diff --git a/.gitattributes b/.gitattributes index 6b1627480f24b470c618e9d4b64ef9f0a65950d8..ce989e83ba3113f46754c2d01f65b25ee89689a0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1109,3 +1109,11 @@ data/2025/2504_12xxx/2504.12532/64bfdedb-3c59-46bb-be2f-711a9c591fc4_origin.pdf data/2025/2504_12xxx/2504.12597/6bba0264-76cf-48c3-b57c-10e72c522273_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_12xxx/2504.12609/9901d2ed-1a6e-4cf8-b052-065c8865ea5c_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_12xxx/2504.12636/9a12e4e5-302c-4454-96ab-c5e493e2dce0_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_content_list.json b/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..c6d8ca52c1c9e5dfb4242e2a733c5f71f4415a77 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_content_list.json @@ -0,0 +1,1518 @@ +[ + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 250, + 537, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We introduce BitNet b1.58 2B4T, the first open-source, native 1-bit Large Language Model (LLM) at the 2-billion parameter scale. Trained on a corpus of 4 trillion tokens, the model has been rigorously evaluated across benchmarks covering language understanding, mathematical reasoning, coding proficiency, and conversational ability. Our results demonstrate that BitNet b1.58 2B4T achieves performance on par with leading open-weight, full-precision LLMs of similar size, while offering significant advantages in computational efficiency, including substantially reduced memory footprint, energy consumption, and decoding latency. To facilitate further research and adoption, the model weights are released via Hugging Face along with open-source inference implementations for both GPU and CPU architectures.", + "bbox": [ + 228, + 279, + 769, + 434 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "BitNet b1.58 2B4T (1.58-bit): bitnet-b1.58-2B-4T", + "bbox": [ + 233, + 441, + 580, + 454 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The packed weight of BitNet b1.58 2B4T, used for inference only", + "bbox": [ + 254, + 455, + 638, + 468 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "BitNet b1.58 2B4T (bf16): bitnet-b1.58-2B-4T-bf16", + "bbox": [ + 233, + 473, + 599, + 487 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The master weight of BitNet b1.58 2B4T, used for training only", + "bbox": [ + 254, + 487, + 630, + 501 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "BitNet b1.58 2B4T (gguf): bitnet-b1.58-2B-4T-gguf", + "bbox": [ + 233, + 505, + 598, + 518 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The GGUF format of BitNet b1.58 2B4T, used for bitnet.cpp", + "bbox": [ + 254, + 518, + 614, + 534 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "BitNet b1.58 2B4T Code: bitnet.cpp Demo: aka.ms/bitnet-demo", + "bbox": [ + 233, + 539, + 710, + 554 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg", + "image_caption": [ + "Figure 1: BitNet b1.58 2B4T advances the Pareto frontier defined by leading open-weight LLMs under 3B parameters in terms of performance versus memory, demonstrating superior efficiency." + ], + "image_footnote": [], + "bbox": [ + 222, + 561, + 769, + 824 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "BitNet b1.58 2B4T Technical Report", + "bbox": [ + 277, + 122, + 720, + 148 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Shuming Ma* Hongyu Wang* Shaohan Huang Xingxing Zhang Ying Hu Ting Song Yan Xia Furu Wei https://aka.ms/GeneralAI", + "bbox": [ + 266, + 200, + 733, + 243 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution. ⋆ Corresponding author. S. Ma, S. Huang, X. Zhang, T. Song, Y. Xia and F. Wei are with Microsoft Research. H. Wang is with University of Chinese Academy of Sciences. Y. Hu is with Tsinghua University.", + "bbox": [ + 169, + 872, + 823, + 912 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.12285v2 [cs.CL] 25 Apr 2025", + "bbox": [ + 22, + 265, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 173, + 89, + 312, + 104 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Open-source large language models (LLMs) have become pivotal in democratizing access to advanced AI capabilities, fostering innovation, and enabling research across diverse fields such as natural language processing, code generation, and vision computing (Dubey et al., 2024; Yang et al., 2024; Bai et al., 2025). Their public availability allows for widespread experimentation and adaptation. However, a significant barrier hinders their broader adoption: the substantial computational resources required for deployment and inference. State-of-the-art open LLMs typically require large memory footprints, consume considerable energy, and exhibit notable inference latency, rendering them impractical for many edge devices, resource-constrained environments, and real-time applications.", + "bbox": [ + 169, + 119, + 826, + 232 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1-bit LLMs, representing an extreme yet promising form of model quantization where weights and potentially activations are constrained to binary $\\{-1, +1\\}$ or ternary $\\{-1, 0, +1\\}$ , offer a compelling solution to the efficiency challenges. By drastically reducing the memory required to store weights and enabling highly efficient bitwise computations, they have the potential to significantly lower deployment costs, reduce energy consumption, and accelerate inference speeds. While prior work has explored 1-bit models, existing open efforts often fall into two categories: 1) post-training quantization (PTQ) methods applied to pre-trained full-precision models, which can lead to significant performance degradation (Xu et al., 2024b; Team, 2024), or 2) native 1-bit models (trained from scratch with 1-bit weights) that have been developed at relatively smaller scales (e.g., OLMo-Bitnet-1B²]) and may not yet match the capabilities of larger, full-precision counterparts. This performance gap has limited the practical impact of 1-bit LLMs thus far.", + "bbox": [ + 169, + 237, + 826, + 391 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To bridge this gap between efficiency and performance, we introduce BitNet b1.58 2B4T, the first open-source, native 1-bit LLM trained at scale. This model, comprising 2 billion parameters, was trained from scratch on a substantial dataset of 4 trillion tokens, leveraging architectural and training innovations specific to the 1-bit paradigm. The core contribution of this work is to demonstrate that a native 1-bit LLM, when trained effectively at scale, can achieve performance comparable to leading open-weight, full-precision models of similar size across a wide range of tasks.", + "bbox": [ + 169, + 396, + 823, + 481 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This technical report details the development and evaluation of BitNet b1.58 2B4T. We describe the architecture and training methodology, and then present comprehensive evaluation results on standard benchmarks assessing language understanding, mathematical reasoning, coding proficiency, and multi-turn conversational abilities. Our findings confirm its strong performance relative to established full-precision baselines, coupled with significant advantages in efficiency. Finally, we announce the public release of the BitNet b1.58 2B4T model weights via Hugging Face and provide open-source inference code optimized for both GPU and CPU execution, aiming to facilitate further research and the practical deployment of highly efficient LLMs.", + "bbox": [ + 169, + 484, + 825, + 599 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Architecture", + "text_level": 1, + "bbox": [ + 171, + 617, + 313, + 633 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The architecture of BitNet b1.58 2B4T is derived from the standard Transformer model (Vaswani et al., 2017), incorporating significant modifications based on the BitNet framework (Wang et al., 2023a; Ma et al., 2024). The model is trained entirely from scratch.", + "bbox": [ + 169, + 648, + 826, + 691 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The core architectural innovation lies in replacing the standard full-precision linear layers (torch(nn.Linear) with custom BitLinear layers. This constitutes the foundation of the BitNet approach. Within these BitLinear layers:", + "bbox": [ + 169, + 696, + 823, + 739 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Weight Quantization: Model weights are quantized to 1.58 bits during the forward pass. This is achieved using an absolute mean (absmean) quantization scheme, which maps weights to ternary values $\\{-1,0, + 1\\}$ . This drastically reduces the model size and enables efficient mathematical operations.", + "- Activation Quantization: Activations flowing through the linear projection are quantized to 8-bit integers. This employs an absolute maximum (absmax) quantization strategy, applied per-token.", + "- Normalization: We incorporate subln normalization (Wang et al., 2022) to further enhance training stability, which can be particularly beneficial in quantized training regimes." + ], + "bbox": [ + 215, + 750, + 823, + 885 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "2https://huggingface.co/NousResearch/OLMo-Bitnet-1B", + "bbox": [ + 192, + 896, + 589, + 911 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Beyond the BitLinear layers, several established LLM techniques are integrated to enhance performance and stability:", + "bbox": [ + 171, + 90, + 823, + 119 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Activation Function (FFN): Within the feed-forward network (FFN) sub-layers, instead of the commonly used SwiGLU activation (Shazeer, 2020), BitNet b1.58 2B4T employs squared ReLU $(\\mathrm{ReLU}^2)$ . This choice is motivated by its potential to improve model sparsity and computational characteristics within the 1-bit context (Wang et al., 2024b,a).", + "- **Positional Embeddings:** Rotary Position Embeddings (RoPE) (Su et al., 2024) are used to inject positional information, a standard practice in modern high-performance LLMs.", + "- Bias Removal: Consistent with architectures like LLaMA, all bias terms are removed from the linear layers and normalization layers throughout the network, reducing parameter count and potentially simplifying quantization." + ], + "bbox": [ + 215, + 131, + 821, + 266 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "For tokenization, we adopt the tokenizer developed for LLaMA 3 (Dubey et al., 2024). This tokenizer implements a byte-level Byte-Pair Encoding (BPE) scheme with a vocabulary size of 128,256 tokens. This choice ensures robust handling of diverse text and code, and its widespread adoption facilitates straightforward integration with existing open-source tooling and ecosystems.", + "bbox": [ + 169, + 280, + 823, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Training", + "text_level": 1, + "bbox": [ + 171, + 357, + 279, + 375 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The training process for BitNet b1.58 2B4T involved three distinct phases: large-scale pre-training followed by supervised fine-tuning (SFT) and direct preference optimization (DPO). While advanced techniques like Proximal Policy Optimization (PPO) or Group Relative Policy Optimization (GRPO) can further enhance capabilities such as mathematics and chain-of-thought reasoning (Schulman et al., 2017; Shao et al., 2024), the current version of BitNet b1.58 2B4T relies solely on pre-training, SFT, and DPO. The exploration of reinforcement learning methods remains a direction for future work.", + "bbox": [ + 169, + 388, + 823, + 484 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Pre-training", + "text_level": 1, + "bbox": [ + 171, + 503, + 300, + 518 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The pre-training phase aimed to imbue the model with broad world knowledge and foundational language capabilities. We adapted general training strategies from established LLM practices (Dubey et al., 2024), with specific adjustments tailored for the 1-bit architecture.", + "bbox": [ + 169, + 529, + 823, + 571 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.1 Learning Rate Schedule", + "text_level": 1, + "bbox": [ + 171, + 587, + 393, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A two-stage learning rate schedule was employed.", + "bbox": [ + 171, + 612, + 503, + 627 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Stage 1 (High Learning Rate):** The initial phase utilized a standard cosine decay schedule but commenced with a relatively high peak learning rate. This decision was informed by the observation that 1-bit models often exhibit greater training stability compared to their full-precision counterparts, allowing for more aggressive initial learning steps.", + "2. **Stage 2 (Cooldown):** Approximately midway through the planned training token count, the learning rate was abruptly decayed and subsequently maintained via a cosine schedule with a significantly lower peak value. This \"cooldown\" phase allows the model to refine its representations on higher-quality data (see Section 3.1.3)." + ], + "bbox": [ + 207, + 638, + 821, + 752 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.2 Weight Decay Schedule", + "text_level": 1, + "bbox": [ + 171, + 771, + 388, + 785 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Complementing the learning rate adjustments, a two-stage weight decay strategy was implemented.", + "bbox": [ + 171, + 795, + 821, + 810 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. **Stage 1:** During the first training stage, weight decay followed a cosine schedule, reaching a peak value of 0.1. This regularization helps prevent overfitting during the initial high learning-rate phase.", + "2. **Stage 2:** In the second stage, weight decay was effectively disabled (set to zero). This allows the model parameters to settle into finer-grained optima guided by the lower learning rate and curated data." + ], + "bbox": [ + 207, + 821, + 821, + 907 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1.3 Pre-training Data", + "text_level": 1, + "bbox": [ + 171, + 90, + 349, + 106 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The pre-training corpus comprised a mixture of publicly available text and code datasets, including large web crawls like DCLM (Li et al., 2024b) and educational web pages like FineWeb-EDU (Penedo et al., 2024). To enhance mathematical reasoning abilities, we also incorporated synthetically generated mathematical data. The data presentation strategy aligned with the two-stage training: the bulk of general web data was processed during Stage 1, while higher-quality curated datasets were emphasized during the Stage 2 cooldown phase, coinciding with the reduced learning rate.", + "bbox": [ + 169, + 114, + 823, + 199 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Supervised Fine-tuning (SFT)", + "text_level": 1, + "bbox": [ + 171, + 214, + 419, + 229 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Following pre-training, the model underwent supervised fine-tuning (SFT) to enhance its instruction-following capabilities and improve its performance in conversational interaction formats.", + "bbox": [ + 169, + 239, + 826, + 268 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 SFT Data", + "text_level": 1, + "bbox": [ + 171, + 282, + 294, + 297 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The SFT phase utilized a diverse collection of publicly available instruction-following and conversational datasets. These included, but were not limited to, WildChat (Zhao et al., 2024), LMSYS-Chat1M (Zheng et al., 2024), WizardLM Evol-Instruct (Xu et al., 2024a), and SlimOrca (Lian et al., 2023). To further bolster specific capabilities, particularly in reasoning and complex instruction adherence, we supplemented these with synthetic datasets generated using methodologies like GLAN (Li et al., 2024a) and MathScale (Tang et al., 2024).", + "bbox": [ + 169, + 306, + 826, + 391 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Chat Template", + "text_level": 1, + "bbox": [ + 171, + 405, + 330, + 420 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For conversational tasks during SFT and inference, the following chat template structure was employed:", + "bbox": [ + 169, + 429, + 826, + 458 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "<|begin_of_text|>System: {system_message}<|eot_id|>\nUser: {user_message_1}<|eot_id|\nAssistant: {assistant_message_1}<|eot_id|\nUser: {user_message_2}<|eot_id|\nAssistant: {assistant_message_2}<|eot_id|...", + "guess_lang": "txt", + "bbox": [ + 169, + 468, + 612, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.3 Optimization Details", + "text_level": 1, + "bbox": [ + 171, + 553, + 370, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Several optimization choices were key during SFT:", + "bbox": [ + 171, + 577, + 509, + 592 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Loss Aggregation: Instead of averaging the cross-entropy loss across tokens within a batch (mean reduction), we employed summation. Empirically, we observed that summing the losses led to improved convergence and better final performance for this model.", + "- Hyperparameter Tuning: Careful tuning of the learning rate and the number of training epochs was performed. Consistent with our pre-training findings, the 1-bit model benefited from a relatively larger learning rate during SFT compared to typical full-precision model fine-tuning. Furthermore, achieving optimal convergence required extending the fine-tuning duration over a larger number of epochs than full-precision models of similar size." + ], + "bbox": [ + 215, + 603, + 825, + 719 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Direct Preference Optimization (DPO)", + "text_level": 1, + "bbox": [ + 171, + 734, + 480, + 751 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To further align the model's behavior with human preferences regarding helpfulness and safety, we applied Direct Preference Optimization (DPO) (Rafailov et al., 2023) following the SFT phase. DPO offers an efficient alternative to traditional RLHF by directly optimizing the language model using preference data, thereby circumventing the need to train a separate reward model. This DPO stage served to refine the model's conversational prowess and overall alignment with desired interaction patterns in practical use cases.", + "bbox": [ + 169, + 761, + 825, + 844 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.1 Training Data", + "text_level": 1, + "bbox": [ + 171, + 859, + 323, + 875 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The preference dataset used for DPO training was constructed from a combination of publicly available resources recognized for capturing diverse human judgments on model outputs. Specifically,", + "bbox": [ + 169, + 883, + 826, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "we utilized UltraFeedback (Cui et al., 2024) and MagPie (Xu et al., 2024c). The aggregation of these datasets provided a robust and multifaceted preference signal, guiding the model towards generating responses more aligned with human expectations.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.2 Training Details", + "text_level": 1, + "bbox": [ + 171, + 147, + 339, + 162 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The DPO training phase was conducted for 2 epochs. We employed a learning rate of $2 \\times 10^{-7}$ and set the DPO beta parameter, which controls the divergence from the reference policy, to 0.1. To enhance training efficiency during this phase, we integrated optimized kernels from the Liger Kernel library (Hsu et al., 2024). Qualitatively, our observations indicate that the DPO process effectively steered the model towards preferred response styles without inducing significant degradation in the core capabilities established during pre-training and SFT.", + "bbox": [ + 169, + 170, + 826, + 255 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Evaluation", + "text_level": 1, + "bbox": [ + 171, + 273, + 297, + 289 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We measure performance on a wide variety of benchmarks classified as follows:", + "bbox": [ + 169, + 303, + 699, + 319 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Language understanding and reasoning: ARC-Easy (Yadav et al., 2019), ARC-Challenge (Yadav et al., 2019), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2020), PIQA (Bisk et al., 2019), OpenbookQA (Mihaylov et al., 2018), and CommonsenseQA (Talmor et al., 2019)", + "- World knowledge: TruthfulQA (Lin et al., 2022) and MMLU (Hendrycks et al., 2021a)", + "- Reading comprehension: TriviaQA (Joshi et al., 2017) and BoolQ (Clark et al., 2019)", + "- Math and code: GSM8K (Cobbe et al., 2021), MATH-500 (Hendrycks et al., 2021b) and HumanEval+ (Liu et al., 2023)", + "- Instruction following and conversation: IFEval (Zhou et al., 2023) and MT-bench (Zheng et al., 2023)" + ], + "bbox": [ + 215, + 329, + 823, + 484 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We compare BitNet b1.58 2B4T with leading open-weight full precision LLMs of similar size, including LLaMA 3.2 1B (Dubey et al., 2024), Gemma-3 1B (Team et al., 2025), Qwen2.5 1.5B (Yang et al., 2024), SmolLM2 1.7B (Allal et al., 2025) and MiniCPM 2B (Hu et al., 2024). All models are instruction-tuned versions. We re-run all benchmarks with a public evaluation pipeline for a fair comparison. More evaluation details are available at the appendix. The main results are presented in Table 1.", + "bbox": [ + 169, + 496, + 826, + 580 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Main Results", + "text_level": 1, + "bbox": [ + 171, + 595, + 305, + 609 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Table 1, BitNet b1.58 2B4T demonstrates remarkable resource efficiency. Its non-embedding memory footprint and estimated energy consumption (Horowitz, 2014; Zhang et al., 2022) during decoding are substantially lower compared to all the full-precision models evaluated, highlighting a significant advantage in operational cost and deployability on resource-constrained devices.", + "bbox": [ + 169, + 619, + 826, + 690 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In terms of task performance, BitNet b1.58 2B4T proves highly competitive. It achieves the best results among the compared models on several benchmarks spanning reasoning, knowledge, and math capabilities. On other benchmarks, its performance is closely comparable to the top-performing full-precision models. While some full-precision models show slight advantages on specific tasks or the overall average, BitNet b1.58 2B4T delivers strong performance across the board. The results indicate that BitNet b1.58 2B4T achieves capabilities nearly on par with leading models in its size class while offering dramatically improved efficiency.", + "bbox": [ + 169, + 696, + 826, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Comparison with Post-training Quantized Models", + "text_level": 1, + "bbox": [ + 169, + 809, + 562, + 825 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We further investigate the efficiency-performance trade-off by comparing BitNet b1.58 2B4T against post-training quantized (PTQ) versions of a leading competitor, Qwen2.5 1.5B, using standard INT4 methods (GPTQ and AWQ). The results are summarized in Table 2.", + "bbox": [ + 169, + 834, + 825, + 878 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "While INT4 quantization successfully reduces the memory footprint of the full-precision model, BitNet b1.58 2B4T achieves an even lower memory requirement due to its native 1-bit architecture.", + "bbox": [ + 169, + 882, + 828, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Benchmark (Metric)LLaMA 3.21BGemma-31BQwen2.51.5BSmolLM21.7BMiniCPM2BBitNet b1.582B
Memory(Non-emb)2GB1.4GB2.6GB3.2GB4.8GB0.4GB
Latency(CPU; TPOT)48ms41ms65ms67ms124ms29ms
Energy(Estimated)0.258J0.186J0.347J0.425J0.649J0.028J
Training Tokens(Pre-training)9T(pruning & distillation)2T(distillation)18T11T1.1T4T
ARC-Challange(0-shot; Acc,norm)37.8038.4046.6743.5244.8049.91
ARC-Easy(0-shot; Acc,norm)63.1763.1376.0162.9272.1474.79
OpenbookQA(0-shot; Acc,norm)34.8038.8040.8046.0040.2041.60
BoolQ(0-shot; Acc)64.6574.2278.0475.7880.6780.18
HellaSwag(0-shot; Acc,norm)60.8057.6968.2871.7170.8168.44
PIQA(0-shot; Acc,norm)74.2171.9376.1276.1276.6677.09
WinoGrande(0-shot; Acc)59.5158.4862.8368.9861.8071.90
CommonsenseQA(10-shot; Acc)58.4842.1076.4163.5571.7471.58
TruthfulQA(10-shot; MC2)43.8038.6646.6739.9041.4145.31
TriviaQA(5-shot; EM)37.6023.4938.3745.9734.1333.57
MMLU(5-shot; Acc)45.5839.9160.2549.2451.8253.17
HumanEval+(0-shot; Pass@1)31.1037.2050.6028.0043.9038.40
GSM8K(4-shot; EM)38.2131.1656.7945.114.4058.38
MATH-500(0-shot; EM)23.0042.0053.0017.6014.8043.40
IFEval(0-shot; Instruct-Strict)62.7166.6750.1257.9136.8153.48
MT-bench(0-shot; Average)5.436.406.125.506.575.85
Average44.9043.7455.2348.7042.0554.19
", + "bbox": [ + 181, + 87, + 812, + 672 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 1: Comparison of BitNet b1.58 2B4T with leading open-weight full-precision LLMs of similar size (1B-2B parameters) on efficiency metrics and performance across a wide range of benchmarks. All models compared are instruction-tuned versions.", + "bbox": [ + 169, + 676, + 823, + 719 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "More importantly, this superior memory efficiency does not compromise performance relative to the quantized models. Standard PTQ techniques lead to a noticeable degradation in performance compared to the original full-precision model. In contrast, BitNet b1.58 2B4T maintains stronger overall performance than the INT4 quantized versions of Qwen2.5-1.5B on the evaluated benchmarks. This comparison suggests that BitNet b1.58 2B4T represents a more favorable point on the efficiency-performance curve than applying conventional INT4 PTQ to existing architectures, offering better performance with lower resource usage.", + "bbox": [ + 169, + 813, + 823, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Benchmark (Metric)Qwen2.5BitNet b1.58
1.5B-bf161.5B-GPTQ-int41.5B-AWQ-int42B
Memory \n(Non-emb)2.6GB0.7GB0.7GB0.4GB
Activationbf16bf16bf16int8
MMLU \n(5-shot; Acc)60.2558.0657.4353.17
GSM8K \n(4-shot; EM)56.7950.5750.6458.38
IFEval \n(0-shot; Instruct-Strict)50.1247.8445.4453.48
Average55.7252.1551.1755.01
", + "bbox": [ + 204, + 88, + 790, + 282 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg", + "table_caption": [ + "Table 2: Comparison of BitNet b1.58 (2B) against Qwen2.5 1.5B in its original bf16 precision and after INT4 post-training quantization (GPTQ and AWQ). All models shown are based on instruction-tuned checkpoints." + ], + "table_footnote": [], + "table_body": "
Benchmark (Metric)Bonsai 0.5BOLMo-Bitnet 1BFalcon3-1.58bit 7BLlama3-8B-1.58 8BBitNet b1.58 2B
Native 1-bit
ARC-Challange (0-shot; Acc,norm)33.1926.5437.8043.6949.91
ARC-Easy (0-shot; Acc,norm)58.2525.3865.0370.7174.79
OpenbookQA (0-shot; Acc,norm)33.6028.2038.2037.2041.60
BoolQ (0-shot; Acc)58.4452.4872.1468.3880.18
HellaSwag (0-shot; Acc,norm)48.0125.8859.4668.5668.44
PIQA (0-shot; Acc,norm)70.0250.4972.3675.3077.09
WinoGrande (0-shot; Acc)54.4651.5460.1460.9371.90
CommonsenseQA (10-shot; Acc)18.4319.4967.0828.5071.58
TruthfulQA (10-shot; MC2)40.6549.0543.2939.1345.31
TriviaQA (5-shot; EM)10.840.000.0019.8233.57
MMLU (5-shot; Acc)25.7425.4742.7935.0453.17
Average41.0632.2250.7649.7560.68
", + "bbox": [ + 192, + 354, + 802, + 712 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Performance comparison of BitNet b1.58 2B4T against other open-weight 1-bit models. This includes natively trained 1-bit models (Bonsai-0.5B, OLMo-Bitnet-1B) and larger models posttraining quantized to 1.58-bit (Falcon3-1.58bit-7B, Llama3-8B-1.58).", + "bbox": [ + 169, + 715, + 828, + 760 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Comparison with Open-weight 1-bit Models", + "text_level": 1, + "bbox": [ + 169, + 794, + 521, + 810 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Finally, we situate BitNet b1.58 2B4T within the landscape of other models designed for or quantized to near 1-bit precision. We compare it against natively trained 1-bit models of smaller scale and significantly larger models post-training quantized to 1.58-bit precision. The comparative results are presented in Table 3.", + "bbox": [ + 169, + 820, + 823, + 878 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "The evaluation clearly positions BitNet b1.58 2B4T as the leading model in this category. It demonstrates significantly stronger overall performance than all other compared 1-bit models, achieving", + "bbox": [ + 169, + 883, + 828, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the highest scores on the vast majority of benchmarks. Notably, BitNet b1.58 2B4T substantially outperforms not only the smaller, natively trained 1-bit models but also the much larger models (in terms of parameter count) that were quantized to 1-bit. This highlights the effectiveness of the native training approach employed by BitNet b1.58 2B4T, allowing it to set a new state-of-the-art performance level for models operating at this extreme level of quantization, even surpassing larger models subjected to post-training quantization.", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Inference Implementation", + "text_level": 1, + "bbox": [ + 171, + 205, + 421, + 222 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Efficient inference is crucial for deploying Large Language Models, particularly for resource-constrained environments. The unique quantization scheme of BitNet b1.58 2B4T, employing 1.58-bit weights and 8-bit activations (W1.58A8), necessitates specialized implementations, as standard deep learning libraries often lack optimized kernels for such mixed-precision, low-bit formats. To address this, we developed and open-sourced dedicated inference libraries for both GPU and CPU platforms. The code is publicly available at https://aka.ms/bitnet.", + "bbox": [ + 169, + 243, + 826, + 327 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 GPU Inference", + "text_level": 1, + "bbox": [ + 171, + 354, + 316, + 368 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Current GPU architectures and their associated software libraries (e.g., cuBLAS, PyTorch kernels) are primarily optimized for operations involving standard data types like FP16, BF16, and INT8/INT4. Native, high-performance support for the specific W1.58A8 matrix multiplication required by BitNet b1.58 2B4T is generally unavailable. This limitation can hinder the realization of the theoretical efficiency gains offered by 1-bit models on existing hardware.", + "bbox": [ + 169, + 383, + 823, + 454 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To enable efficient GPU inference, we developed a custom CUDA kernel specifically designed for the W1.58A8 matrix multiplication. Since ternary weights $\\{-1,0, + 1\\}$ , representing 1.58 bits) cannot be stored efficiently using standard data types, we pack multiple weight values into a single 8-bit integer ('int8') for storage in High Bandwidth Memory (HBM). Specifically, four ternary values are encoded into one 'int8' value. During computation, the CUDA kernel loads the packed 'int8' weights from HBM into the GPU's faster on-chip Shared Memory (SRAM). It then unpacks these values back into a representation suitable for efficient ternary computation (e.g., reconstructing the -1, 0, +1 values) immediately before performing the matrix multiplication with the 8-bit activations. This 'pack-store-load-unpack-compute' strategy minimizes memory bandwidth usage while leveraging custom compute instructions. Further implementation details and optimization strategies are elaborated in the Ladder framework (Wang et al., 2023b).", + "bbox": [ + 169, + 459, + 825, + 612 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While our custom kernel significantly improves performance compared to naive implementations, we note that current commodity GPU architectures are not optimally designed for the 1-bit models. We believe that future hardware innovations, potentially incorporating dedicated logic for low-bit operations, will be essential to fully unlock the performance and energy efficiency potential of models like BitNet b1.58.", + "bbox": [ + 169, + 618, + 823, + 686 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 CPU Inference", + "text_level": 1, + "bbox": [ + 171, + 715, + 316, + 729 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To ensure broad accessibility and enable deployment on devices lacking powerful GPUs (e.g., edge devices, laptops, standard servers), we developed bitnet.cpp. This C++ library serves as an official reference implementation for CPU inference of 1-bit LLMs, including BitNet b1.58.", + "bbox": [ + 169, + 744, + 823, + 787 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "bitnet.cpp provides optimized kernels tailored for efficient execution on standard CPU architectures. The kernels are designed to operate efficiently with the model's specific quantization scheme, avoiding the overhead of generic quantization libraries or intricate low-level bit manipulation where possible. It processes the weight elements in a manner consistent with the BitNet b1.58 training methodology, ensuring numerical accuracy (lossless inference relative to the training procedure).", + "bbox": [ + 169, + 792, + 823, + 863 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "This approach delivers fast and accurate inference of 1.58-bit models directly on CPUs. More technical details and usage instructions can be found in the bitnet.cpp repository and associated technical report (Wang et al., 2025).", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 90, + 302, + 107 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This technical report introduced BitNet b1.58 2B4T, a significant step towards highly efficient yet capable Large Language Models. As the first open-source, native 1-bit LLM trained at the 2-billion parameter scale on 4 trillion tokens, our work demonstrates the viability of extreme quantization directly within the training process.", + "bbox": [ + 169, + 122, + 823, + 178 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Comprehensive evaluations across benchmarks assessing language understanding, reasoning, mathematics, coding, and dialogue revealed that BitNet b1.58 2B4T achieves performance comparable to state-of-the-art open-weight, full-precision models of similar size. Crucially, this performance parity is achieved with dramatically reduced computational requirements, offering substantial savings in memory footprint, energy consumption, and inference latency. To facilitate practical use and further research, we developed and released optimized inference implementations for both GPU (via custom CUDA kernels) and CPU (via the 'bitnet.cpp' library), alongside the model weights available on Hugging Face.", + "bbox": [ + 169, + 184, + 826, + 297 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "BitNet b1.58 2B4T represents a compelling proof-of-concept that challenges the necessity of full-precision weights for achieving high performance in LLMs at scale. It opens avenues for deploying powerful language models in resource-constrained environments where previous models were prohibitive, potentially democratizing access to advanced AI capabilities.", + "bbox": [ + 169, + 301, + 826, + 358 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 Future Directions", + "text_level": 1, + "bbox": [ + 171, + 378, + 354, + 396 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "While BitNet b1.58 2B4T demonstrates promising results, several exciting research directions remain:", + "bbox": [ + 169, + 412, + 826, + 429 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Scaling Laws and Larger Models: Investigating the scaling properties of native 1-bit LLMs is crucial. Future work will explore training larger models (e.g., 7B, 13B parameters and beyond) and training on even larger datasets to understand if the performance parity with full-precision models holds.", + "- Hardware Co-Design and Optimization: The full potential of 1-bit models is likely hindered by current hardware limitations. Continued development of highly optimized kernels for existing hardware (GPUs, CPUs, NPUs) is needed. Furthermore, co-designing future hardware accelerators specifically optimized for 1-bit computations and data movement could unlock orders-of-magnitude improvements in speed and energy efficiency.", + "- Extended Sequence Length: Extending the maximum sequence length of BitNet b1.58 2B4T can process is crucial. This enhancement is vital for tasks demanding long-context understanding, such as summarizing lengthy documents or engaging in complex problem-solving, and is particularly critical for improving performance on long chain-of-thought reasoning tasks. Investigating efficient attention mechanisms suitable for low-bit models at longer sequence lengths will be key.", + "- Multilingual Capabilities: The current model is primarily trained on English-centric data. Extending the pre-training corpus and potentially adapting the architecture to effectively support multiple languages is a key direction for broader applicability.", + "- Multimodal Integration: Exploring the integration of 1-bit principles into multimodal architectures is another promising frontier. Developing efficient ways to process and fuse information from different modalities (e.g., text and images) within a low-bit framework could enable new applications.", + "- Theoretical Understanding: Delving deeper into the theoretical underpinnings of why 1-bit training at scale is effective remains an open area. Analyzing the learning dynamics, loss landscapes, and representational properties of these models could yield valuable insights for future development." + ], + "bbox": [ + 215, + 441, + 826, + 840 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "By pursuing these directions, we aim to further advance the capability and efficiency of 1-bit LLMs, paving the way for more sustainable and accessible artificial intelligence. The open-source release of BitNet b1.58 2B4T and its associated tools provides a foundation for the community to build upon these efforts.", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Allal, L. B., Lozhkov, A., Bakouch, E., Blázquez, G. M., Penedo, G., Tunstall, L., Marafioti, A., Kydlíček, H., Lajarín, A. P., Srivastav, V., Lochner, J., Fahlgren, C., Nguyen, X.-S., Fourrier, C., Burtenshaw, B., Larcher, H., Zhao, H., Zakka, C., Morlon, M., Raffel, C., von Werra, L., and Wolf, T. (2025). Smollm2: When smol goes big - data-centric training of a small language model. CoRR, abs/2502.02737.", + "Bai, S., Chen, K., Liu, X., Wang, J., Ge, W., Song, S., Dang, K., Wang, P., Wang, S., Tang, J., Zhong, H., Zhu, Y., Yang, M.-H., Li, Z., Wan, J., Wang, P., Ding, W., Fu, Z., Xu, Y., Ye, J., Zhang, X., Xie, T., Cheng, Z., Zhang, H., Yang, Z., Xu, H., and Lin, J. (2025). Qwen2.5-vl technical report. CoRR, abs/2502.13923.", + "Bisk, Y., Zellers, R., Bras, R. L., Gao, J., and Choi, Y. (2019). PIQA: reasoning about physical commonsense in natural language. CoRR, abs/1911.11641.", + "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. (2019). Boolq: Exploring the surprising difficulty of natural yes/no questions. CoRR, abs/1905.10044.", + "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. (2021). Training verifiers to solve math word problems. CoRR, abs/2110.14168.", + "Cui, G., Yuan, L., Ding, N., Yao, G., He, B., Zhu, W., Ni, Y., Xie, G., Xie, R., Lin, Y., Liu, Z., and Sun, M. (2024). ULTRAFEEDBACK: boosting language models with scaled AI feedback. In ICML. OpenReview.net.", + "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Rozière, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I. M., Misra, I., Evtimov, I., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., and et al. (2024). The llama 3 herd of models. CoRR, abs/2407.21783.", + "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. (2021a). Measuring massive multitask language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021.", + "Hendrycks, D., Burns, C., Kadavath, S., Arora, A., Basart, S., Tang, E., Song, D., and Steinhardt, J. (2021b). Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual.", + "Horowitz, M. (2014). 1.1 computing's energy problem (and what we can do about it). In 2014 IEEE International Conference on Solid-State Circuits Conference, ISSCC 2014, Digest of Technical Papers, San Francisco, CA, USA, February 9-13, 2014, pages 10-14.", + "Hsu, P.-L., Dai, Y., Kothapalli, V., Song, Q., Tang, S., Zhu, S., Shimizu, S., Sahni, S., Ning, H., and Chen, Y. (2024). Liger kernel: Efficient triton kernels for LLM training. CoRR, abs/2410.10989.", + "Hu, S., Tu, Y., Han, X., He, C., Cui, G., Long, X., Zheng, Z., Fang, Y., Huang, Y., Zhao, W., Zhang, X., Thai, Z. L., Zhang, K., Wang, C., Yao, Y., Zhao, C., Zhou, J., Cai, J., Zhai, Z., Ding, N., Jia, C., Zeng, G., Li, D., Liu, Z., and Sun, M. (2024). Minicpm: Unveiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395." + ], + "bbox": [ + 173, + 114, + 828, + 911 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. (2017). Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1601-1611.", + "Li, H., Dong, Q., Tang, Z., Wang, C., Zhang, X., Huang, H., Huang, S., Huang, X., Huang, Z., Zhang, D., Gu, Y., Cheng, X., Wang, X., Chen, S.-Q., Dong, L., Lu, W., Sui, Z., Wang, B., Lam, W., and Wei, F. (2024a). Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064.", + "Li, J., Fang, A., Smyrnis, G., Ivgi, M., Jordan, M., Gadre, S. Y., Bansal, H., Guha, E., Keh, S. S., Arora, K., Garg, S., Xin, R., Muennighoff, N., Heckel, R., Mercat, J., Chen, M. F., Gururangan, S., Wortsman, M., Albalak, A., Bitton, Y., Nezhurina, M., Abbas, A., Hsieh, C.-Y., Ghosh, D., Gardner, J., Kilian, M., Zhang, H., Shao, R., Pratt, S. M., Sanyal, S., Ilharco, G., Daras, G., Marathe, K., Gokaslan, A., Zhang, J., Chandu, K. R., Nguyen, T., Vasiljevic, I., Kakade, S. M., Song, S., Sanghavi, S., Faghri, F., Oh, S., Zettlemoyer, L., Lo, K., El-Nouby, A., Pouransari, H., Toshev, A., Wang, S., Groeneveld, D., Soldaini, L., Koh, P. W., Jitsev, J., Kollar, T., Dimakis, A., Carmon, Y., Dave, A., Schmidt, L., and Shankar, V. (2024b). Datacomp-lm: In search of the next generation of training sets for language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.", + "Lian, W., Wang, G., Goodson, B., Pentland, E., Cook, A., Vong, C., and \"Teknium\" (2023). Slimorca: An open dataset of gpt-4 augmented flan reasoning traces, with verification.", + "Lin, S., Hilton, J., and Evans, O. (2022). Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252.", + "Liu, J., Xia, C. S., Wang, Y., and Zhang, L. (2023). Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. Advances in Neural Information Processing Systems, 36:21558-21572.", + "Ma, S., Wang, H., Ma, L., Wang, L., Wang, W., Huang, S., Dong, L., Wang, R., Xue, J., and Wei, F. (2024). The era of 1-bit llms: All large language models are in 1.58 bits. CoRR, abs/2402.17764.", + "Mihaylov, T., Clark, P., Khot, T., and Sabharwal, A. (2018). Can a suit of armor conduct electricity? A new dataset for open book question answering. CoRR, abs/1809.02789.", + "Penedo, G., Kydlícek, H., Allal, L. B., Lozhkov, A., Mitchell, M., Raffel, C. A., von Werra, L., and Wolf, T. (2024). The fineweb datasets: Decanting the web for the finest text data at scale. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.", + "Rafailov, R., Sharma, A., Mitchell, E., Manning, C. D., Ermon, S., and Finn, C. (2023). Direct preference optimization: Your language model is secretly a reward model. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S., editors, Advances in Neural Information Processing Systems 36.", + "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. (2020). WinoGrande: an adversarial winograd schema challenge at scale. In AAAI, pages 8732-8740.", + "Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. (2017). Proximal policy optimization algorithms. CoRR, abs/1707.06347.", + "Shao, Z., Wang, P., Zhu, Q., Xu, R., Song, J., Zhang, M., Li, Y. K., Wu, Y., and Guo, D. (2024). Deepseekmath: Pushing the limits of mathematical reasoning in open language models. CoRR, abs/2402.03300.", + "Shazeer, N. (2020). GLU variants improve transformer. CoRR, abs/2002.05202." + ], + "bbox": [ + 171, + 90, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Su, J., Ahmed, M. H. M., Lu, Y., Pan, S., Bo, W., and Liu, Y. (2024). Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063.", + "Talmor, A., Herzig, J., Lourie, N., and Berant, J. (2019). Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4149-4158.", + "Tang, Z., Zhang, X., Wang, B., and Wei, F. (2024). Mathscale: Scaling instruction tuning for mathematical reasoning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net.", + "Team, F.-L. (2024). The falcon 3 family of open models.", + "Team, G., Kamath, A., Ferret, J., Pathak, S., Vieillard, N., Merhej, R., Perrin, S., Matejovicova, T., Ram'e, A., Rivi'ere, M., et al. (2025). Gemma 3 technical report. arXiv preprint arXiv:2503.19786.", + "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. In Guyon, I., von Luxburg, U., Bengio, S., Wallach, H. M., Fergus, R., Vishwanathan, S. V. N., and Garnett, R., editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008.", + "Wang, H., Ma, S., Dong, L., Huang, S., Wang, H., Ma, L., Yang, F., Wang, R., Wu, Y., and Wei, F. (2023a). Bitnet: Scaling 1-bit transformers for large language models. CoRR, abs/2310.11453.", + "Wang, H., Ma, S., Huang, S., Dong, L., Wang, W., Peng, Z., Wu, Y., Bajaj, P., Singhal, S., Benhaim, A., Patra, B., Liu, Z., Chaudhary, V., Song, X., and Wei, F. (2022). Foundation transformers. CoRR.", + "Wang, H., Ma, S., Wang, R., and Wei, F. (2024a). Q-sparse: All large language models can be fully sparsely-activated. CoRR, abs/2407.10969.", + "Wang, H., Ma, S., and Wei, F. (2024b). Bitnet a4.8: 4-bit activations for 1-bit llms. CoRR, abs/2411.04965.", + "Wang, J., Zhou, H., Song, T., Cao, S., Xia, Y., Cao, T., Wei, J., Ma, S., Wang, H., and Wei, F. (2025). Bitnet.cpp: Efficient edge inference for ternary lms. CoRR, abs/2502.11880.", + "Wang, L., Ma, L., Cao, S., Zheng, N., Zhang, Q., Xue, J., Miao, Z., Cao, T., and Yang, Y. (2023b). Ladder: Efficient tensor compilation on customized data format. In OSDI.", + "Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., Lin, Q., and Jiang, D. (2024a). Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.", + "Xu, Y., Han, X., Yang, Z., Wang, S., Zhu, Q., Liu, Z., Liu, W., and Che, W. (2024b). Onebit: Towards extremely low-bit large language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024.", + "Xu, Z., Jiang, F., Niu, L., Deng, Y., Poovendran, R., Choi, Y., and Lin, B. Y. (2024c). Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. CoRR, abs/2406.08464.", + "Yadav, V., Bethard, S., and Surdeanu, M. (2019). Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering. In EMNLP-IJCNLP.", + "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., Lin, H., Yang, J., Tu, J., Zhang, J., Yang, J., Yang, J., Zhou, J., Lin, J., Dang, K., Lu, K., Bao, K., Yang, K., Yu, L., Li, M., Xue, M., Zhang, P., Zhu, Q., Men, R., Lin, R., Li, T., Xia, T., Ren, X., Ren, X., Fan, Y., Su, Y., Zhang, Y., Wan, Y., Liu, Y., Cui, Z., Zhang, Z., and Qiu, Z. (2024). Qwen2.5 technical report. CoRR, abs/2412.15115." + ], + "bbox": [ + 173, + 90, + 828, + 911 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. (2019). HellaSwag: can a machine really finish your sentence? In Proceedings of the 57th Conference of the Association for Computational Linguistics, pages 4791-4800.", + "Zhang, Y., Zhang, Z., and Lew, L. (2022). PokeBNN: A binary pursuit of lightweight accuracy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12465-12475. IEEE.", + "Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. (2024). Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.", + "Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E. P., Gonzalez, J. E., Stoica, I., and Zhang, H. (2024). Lmsys-chat-1m: A large-scale real-world LLM conversation dataset. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net.", + "Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. (2023). Judging lvm-as-a-judge with mt-bench and chatbot arena. In Advances in Neural Information Processing Systems 36.", + "Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. (2023). Instruction-following evaluation for large language models. CoRR, abs/2311.07911." + ], + "bbox": [ + 171, + 90, + 826, + 380 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Open-weight Baselines", + "text_level": 1, + "bbox": [ + 171, + 405, + 401, + 422 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We summarize the links to the open-weight LLMs evaluated in this work as below:", + "bbox": [ + 171, + 436, + 718, + 452 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- LLaMA 3.2 1B: meta-llama/Llama-3.2-1B-Instruct", + "- Gemma-3 1B: google/gemma-3-1b-it", + "Qwen2.5 0.5B: Qwen/Qwen2.5-0.5B-Instruct", + "- Qwen2.5 1.5B: Qwen/Qwen2.5-1.5B-Instruct", + "- Qwen2.5 3B: Qwen/Qwen2.5-3B-Instruct", + "- SmolLM2 1.7B: HuggingFaceTB/SmolLM2-1.7B-Instruct", + "- MiniCPM 2B: openbmb/MiniCPM-2B-dpo-bf16", + "- Qwen2.5 1.5B-GPTQ-int4: Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4", + "Qwen2.5 1.5B-AWQ-int4: Qwen/Qwen2.5-1.5B-Instruct-AWQ", + "- Bonsai 0.5B: deepgrove/Bonsai", + "- OLMo-Bitnet 1B: NousResearch/OLMo-Bitnet-1B", + "- Falcon3-1.58bit 7B: tiiuae/Falcon3-7B-Instruct-1.58bit", + "- Llama3-8B-1.58 8B: HF1BitLLM/Llama3-8B-1.58-100B-tokens" + ], + "bbox": [ + 215, + 464, + 730, + 707 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B Evaluation Pipeline Details", + "text_level": 1, + "bbox": [ + 171, + 727, + 434, + 744 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To ensure standardized evaluation, we employed established toolkits for different benchmark categories. Specifically:", + "bbox": [ + 169, + 758, + 826, + 787 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For the HumanEval+ coding benchmark, we utilized the evalplus toolkit.", + "- For the MATH-500 mathematical reasoning benchmark, we used a customized version of the math-evaluation-harness toolkit.", + "- For the MT-Bench conversational benchmark, evaluation was performed using the official LLM Judge open-source codebase.", + "- For all other benchmarks assessing language understanding, reasoning, knowledge, and comprehension, we used the standard lm-evaluation-harness framework." + ], + "bbox": [ + 215, + 797, + 825, + 911 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
BitsADD EnergyMUL Energy
FP160.160.34
INT80.0070.07
", + "bbox": [ + 362, + 88, + 633, + 147 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 4: ADD and MUL energy consumption (in pJ) of different precision at $7\\mathrm{nm}$ process nodes.", + "bbox": [ + 181, + 152, + 815, + 167 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Models were prompted using a chat format for generative tasks (e.g., GSM8K, IFEval, and MT-Bench), while default settings from the respective toolkits were used for other tasks.", + "bbox": [ + 174, + 193, + 823, + 220 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "For energy consumption, we utilize the energy model in (Horowitz, 2014; Zhang et al., 2022) to estimate the arithmetic operations energy (AOE) of matrix multiplication. The sequence length is set as 512 tokens. We present the energy consumption for ADD and MUL operation at $7\\mathrm{nm}$ process nodes in Table 4.", + "bbox": [ + 174, + 227, + 823, + 282 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "To assess CPU decoding performance, latency measurements were conducted on a Surface Laptop Studio 2 system powered by a 13th Gen Intel Core i7-13800H processor. The benchmarking process utilized 8 CPU threads. Specifically, the BitNet b1.58 2B4T model was tested using its bitnet.cpp implementation, whereas other models were evaluated using the llama.cpp framework. For each model, we generated 128 tokens and report the average latency per token for this task.", + "bbox": [ + 174, + 289, + 823, + 359 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 13 + } +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_model.json b/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a0185b003cedbd08d4541237075c9cfe40c7e98d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_model.json @@ -0,0 +1,2384 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.279, + 0.123, + 0.721, + 0.149 + ], + "angle": 0, + "content": "BitNet b1.58 2B4T Technical Report" + }, + { + "type": "header", + "bbox": [ + 0.267, + 0.201, + 0.735, + 0.244 + ], + "angle": 0, + "content": "Shuming Ma* Hongyu Wang* Shaohan Huang Xingxing Zhang Ying Hu Ting Song Yan Xia Furu Wei https://aka.ms/GeneralAI" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.251, + 0.538, + 0.266 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.28, + 0.77, + 0.435 + ], + "angle": 0, + "content": "We introduce BitNet b1.58 2B4T, the first open-source, native 1-bit Large Language Model (LLM) at the 2-billion parameter scale. Trained on a corpus of 4 trillion tokens, the model has been rigorously evaluated across benchmarks covering language understanding, mathematical reasoning, coding proficiency, and conversational ability. Our results demonstrate that BitNet b1.58 2B4T achieves performance on par with leading open-weight, full-precision LLMs of similar size, while offering significant advantages in computational efficiency, including substantially reduced memory footprint, energy consumption, and decoding latency. To facilitate further research and adoption, the model weights are released via Hugging Face along with open-source inference implementations for both GPU and CPU architectures." + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.442, + 0.581, + 0.455 + ], + "angle": 0, + "content": "BitNet b1.58 2B4T (1.58-bit): bitnet-b1.58-2B-4T" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.456, + 0.64, + 0.469 + ], + "angle": 0, + "content": "The packed weight of BitNet b1.58 2B4T, used for inference only" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.474, + 0.6, + 0.488 + ], + "angle": 0, + "content": "BitNet b1.58 2B4T (bf16): bitnet-b1.58-2B-4T-bf16" + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.488, + 0.632, + 0.502 + ], + "angle": 0, + "content": "The master weight of BitNet b1.58 2B4T, used for training only" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.506, + 0.599, + 0.52 + ], + "angle": 0, + "content": "BitNet b1.58 2B4T (gguf): bitnet-b1.58-2B-4T-gguf" + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.52, + 0.615, + 0.535 + ], + "angle": 0, + "content": "The GGUF format of BitNet b1.58 2B4T, used for bitnet.cpp" + }, + { + "type": "text", + "bbox": [ + 0.235, + 0.54, + 0.711, + 0.555 + ], + "angle": 0, + "content": "BitNet b1.58 2B4T Code: bitnet.cpp Demo: aka.ms/bitnet-demo" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.563, + 0.771, + 0.825 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.83, + 0.825, + 0.861 + ], + "angle": 0, + "content": "Figure 1: BitNet b1.58 2B4T advances the Pareto frontier defined by leading open-weight LLMs under 3B parameters in terms of performance versus memory, demonstrating superior efficiency." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.873, + 0.825, + 0.914 + ], + "angle": 0, + "content": "* Equal contribution. ⋆ Corresponding author. S. Ma, S. Huang, X. Zhang, T. Song, Y. Xia and F. Wei are with Microsoft Research. H. Wang is with University of Chinese Academy of Sciences. Y. Hu is with Tsinghua University." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.266, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.12285v2 [cs.CL] 25 Apr 2025" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.313, + 0.106 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.827, + 0.233 + ], + "angle": 0, + "content": "Open-source large language models (LLMs) have become pivotal in democratizing access to advanced AI capabilities, fostering innovation, and enabling research across diverse fields such as natural language processing, code generation, and vision computing (Dubey et al., 2024; Yang et al., 2024; Bai et al., 2025). Their public availability allows for widespread experimentation and adaptation. However, a significant barrier hinders their broader adoption: the substantial computational resources required for deployment and inference. State-of-the-art open LLMs typically require large memory footprints, consume considerable energy, and exhibit notable inference latency, rendering them impractical for many edge devices, resource-constrained environments, and real-time applications." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.238, + 0.828, + 0.392 + ], + "angle": 0, + "content": "1-bit LLMs, representing an extreme yet promising form of model quantization where weights and potentially activations are constrained to binary \\(\\{-1, +1\\}\\) or ternary \\(\\{-1, 0, +1\\}\\), offer a compelling solution to the efficiency challenges. By drastically reducing the memory required to store weights and enabling highly efficient bitwise computations, they have the potential to significantly lower deployment costs, reduce energy consumption, and accelerate inference speeds. While prior work has explored 1-bit models, existing open efforts often fall into two categories: 1) post-training quantization (PTQ) methods applied to pre-trained full-precision models, which can lead to significant performance degradation (Xu et al., 2024b; Team, 2024), or 2) native 1-bit models (trained from scratch with 1-bit weights) that have been developed at relatively smaller scales (e.g., OLMo-Bitnet-1B²]) and may not yet match the capabilities of larger, full-precision counterparts. This performance gap has limited the practical impact of 1-bit LLMs thus far." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.825, + 0.482 + ], + "angle": 0, + "content": "To bridge this gap between efficiency and performance, we introduce BitNet b1.58 2B4T, the first open-source, native 1-bit LLM trained at scale. This model, comprising 2 billion parameters, was trained from scratch on a substantial dataset of 4 trillion tokens, leveraging architectural and training innovations specific to the 1-bit paradigm. The core contribution of this work is to demonstrate that a native 1-bit LLM, when trained effectively at scale, can achieve performance comparable to leading open-weight, full-precision models of similar size across a wide range of tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.486, + 0.826, + 0.6 + ], + "angle": 0, + "content": "This technical report details the development and evaluation of BitNet b1.58 2B4T. We describe the architecture and training methodology, and then present comprehensive evaluation results on standard benchmarks assessing language understanding, mathematical reasoning, coding proficiency, and multi-turn conversational abilities. Our findings confirm its strong performance relative to established full-precision baselines, coupled with significant advantages in efficiency. Finally, we announce the public release of the BitNet b1.58 2B4T model weights via Hugging Face and provide open-source inference code optimized for both GPU and CPU execution, aiming to facilitate further research and the practical deployment of highly efficient LLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.618, + 0.314, + 0.634 + ], + "angle": 0, + "content": "2 Architecture" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.649, + 0.827, + 0.692 + ], + "angle": 0, + "content": "The architecture of BitNet b1.58 2B4T is derived from the standard Transformer model (Vaswani et al., 2017), incorporating significant modifications based on the BitNet framework (Wang et al., 2023a; Ma et al., 2024). The model is trained entirely from scratch." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.697, + 0.825, + 0.74 + ], + "angle": 0, + "content": "The core architectural innovation lies in replacing the standard full-precision linear layers (torch(nn.Linear) with custom BitLinear layers. This constitutes the foundation of the BitNet approach. Within these BitLinear layers:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.751, + 0.825, + 0.806 + ], + "angle": 0, + "content": "- Weight Quantization: Model weights are quantized to 1.58 bits during the forward pass. This is achieved using an absolute mean (absmean) quantization scheme, which maps weights to ternary values \\(\\{-1,0, + 1\\}\\). This drastically reduces the model size and enables efficient mathematical operations." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.812, + 0.825, + 0.853 + ], + "angle": 0, + "content": "- Activation Quantization: Activations flowing through the linear projection are quantized to 8-bit integers. This employs an absolute maximum (absmax) quantization strategy, applied per-token." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.858, + 0.825, + 0.886 + ], + "angle": 0, + "content": "- Normalization: We incorporate subln normalization (Wang et al., 2022) to further enhance training stability, which can be particularly beneficial in quantized training regimes." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.751, + 0.825, + 0.886 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.897, + 0.591, + 0.912 + ], + "angle": 0, + "content": "2https://huggingface.co/NousResearch/OLMo-Bitnet-1B" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "Beyond the BitLinear layers, several established LLM techniques are integrated to enhance performance and stability:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.132, + 0.822, + 0.185 + ], + "angle": 0, + "content": "- Activation Function (FFN): Within the feed-forward network (FFN) sub-layers, instead of the commonly used SwiGLU activation (Shazeer, 2020), BitNet b1.58 2B4T employs squared ReLU \\((\\mathrm{ReLU}^2)\\). This choice is motivated by its potential to improve model sparsity and computational characteristics within the 1-bit context (Wang et al., 2024b,a)." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.193, + 0.822, + 0.22 + ], + "angle": 0, + "content": "- **Positional Embeddings:** Rotary Position Embeddings (RoPE) (Su et al., 2024) are used to inject positional information, a standard practice in modern high-performance LLMs." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.227, + 0.822, + 0.267 + ], + "angle": 0, + "content": "- Bias Removal: Consistent with architectures like LLaMA, all bias terms are removed from the linear layers and normalization layers throughout the network, reducing parameter count and potentially simplifying quantization." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.132, + 0.822, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.281, + 0.825, + 0.337 + ], + "angle": 0, + "content": "For tokenization, we adopt the tokenizer developed for LLaMA 3 (Dubey et al., 2024). This tokenizer implements a byte-level Byte-Pair Encoding (BPE) scheme with a vocabulary size of 128,256 tokens. This choice ensures robust handling of diverse text and code, and its widespread adoption facilitates straightforward integration with existing open-source tooling and ecosystems." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.358, + 0.281, + 0.375 + ], + "angle": 0, + "content": "3 Training" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.39, + 0.825, + 0.485 + ], + "angle": 0, + "content": "The training process for BitNet b1.58 2B4T involved three distinct phases: large-scale pre-training followed by supervised fine-tuning (SFT) and direct preference optimization (DPO). While advanced techniques like Proximal Policy Optimization (PPO) or Group Relative Policy Optimization (GRPO) can further enhance capabilities such as mathematics and chain-of-thought reasoning (Schulman et al., 2017; Shao et al., 2024), the current version of BitNet b1.58 2B4T relies solely on pre-training, SFT, and DPO. The exploration of reinforcement learning methods remains a direction for future work." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.504, + 0.301, + 0.519 + ], + "angle": 0, + "content": "3.1 Pre-training" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.53, + 0.825, + 0.573 + ], + "angle": 0, + "content": "The pre-training phase aimed to imbue the model with broad world knowledge and foundational language capabilities. We adapted general training strategies from established LLM practices (Dubey et al., 2024), with specific adjustments tailored for the 1-bit architecture." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.588, + 0.394, + 0.603 + ], + "angle": 0, + "content": "3.1.1 Learning Rate Schedule" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.613, + 0.504, + 0.628 + ], + "angle": 0, + "content": "A two-stage learning rate schedule was employed." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.639, + 0.822, + 0.692 + ], + "angle": 0, + "content": "1. **Stage 1 (High Learning Rate):** The initial phase utilized a standard cosine decay schedule but commenced with a relatively high peak learning rate. This decision was informed by the observation that 1-bit models often exhibit greater training stability compared to their full-precision counterparts, allowing for more aggressive initial learning steps." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.7, + 0.822, + 0.753 + ], + "angle": 0, + "content": "2. **Stage 2 (Cooldown):** Approximately midway through the planned training token count, the learning rate was abruptly decayed and subsequently maintained via a cosine schedule with a significantly lower peak value. This \"cooldown\" phase allows the model to refine its representations on higher-quality data (see Section 3.1.3)." + }, + { + "type": "list", + "bbox": [ + 0.209, + 0.639, + 0.822, + 0.753 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.772, + 0.389, + 0.786 + ], + "angle": 0, + "content": "3.1.2 Weight Decay Schedule" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.796, + 0.822, + 0.811 + ], + "angle": 0, + "content": "Complementing the learning rate adjustments, a two-stage weight decay strategy was implemented." + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.822, + 0.822, + 0.861 + ], + "angle": 0, + "content": "1. **Stage 1:** During the first training stage, weight decay followed a cosine schedule, reaching a peak value of 0.1. This regularization helps prevent overfitting during the initial high learning-rate phase." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.87, + 0.822, + 0.908 + ], + "angle": 0, + "content": "2. **Stage 2:** In the second stage, weight decay was effectively disabled (set to zero). This allows the model parameters to settle into finer-grained optima guided by the lower learning rate and curated data." + }, + { + "type": "list", + "bbox": [ + 0.209, + 0.822, + 0.822, + 0.908 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.35, + 0.107 + ], + "angle": 0, + "content": "3.1.3 Pre-training Data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.115, + 0.825, + 0.2 + ], + "angle": 0, + "content": "The pre-training corpus comprised a mixture of publicly available text and code datasets, including large web crawls like DCLM (Li et al., 2024b) and educational web pages like FineWeb-EDU (Penedo et al., 2024). To enhance mathematical reasoning abilities, we also incorporated synthetically generated mathematical data. The data presentation strategy aligned with the two-stage training: the bulk of general web data was processed during Stage 1, while higher-quality curated datasets were emphasized during the Stage 2 cooldown phase, coinciding with the reduced learning rate." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.215, + 0.421, + 0.231 + ], + "angle": 0, + "content": "3.2 Supervised Fine-tuning (SFT)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.241, + 0.827, + 0.27 + ], + "angle": 0, + "content": "Following pre-training, the model underwent supervised fine-tuning (SFT) to enhance its instruction-following capabilities and improve its performance in conversational interaction formats." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.284, + 0.295, + 0.299 + ], + "angle": 0, + "content": "3.2.1 SFT Data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.828, + 0.392 + ], + "angle": 0, + "content": "The SFT phase utilized a diverse collection of publicly available instruction-following and conversational datasets. These included, but were not limited to, WildChat (Zhao et al., 2024), LMSYS-Chat1M (Zheng et al., 2024), WizardLM Evol-Instruct (Xu et al., 2024a), and SlimOrca (Lian et al., 2023). To further bolster specific capabilities, particularly in reasoning and complex instruction adherence, we supplemented these with synthetic datasets generated using methodologies like GLAN (Li et al., 2024a) and MathScale (Tang et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.406, + 0.331, + 0.421 + ], + "angle": 0, + "content": "3.2.2 Chat Template" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.827, + 0.459 + ], + "angle": 0, + "content": "For conversational tasks during SFT and inference, the following chat template structure was employed:" + }, + { + "type": "code", + "bbox": [ + 0.171, + 0.469, + 0.613, + 0.54 + ], + "angle": 0, + "content": "<|begin_of_text|>System: {system_message}<|eot_id|>\nUser: {user_message_1}<|eot_id|\nAssistant: {assistant_message_1}<|eot_id|\nUser: {user_message_2}<|eot_id|\nAssistant: {assistant_message_2}<|eot_id|..." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.554, + 0.371, + 0.569 + ], + "angle": 0, + "content": "3.2.3 Optimization Details" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.578, + 0.51, + 0.593 + ], + "angle": 0, + "content": "Several optimization choices were key during SFT:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.604, + 0.825, + 0.647 + ], + "angle": 0, + "content": "- Loss Aggregation: Instead of averaging the cross-entropy loss across tokens within a batch (mean reduction), we employed summation. Empirically, we observed that summing the losses led to improved convergence and better final performance for this model." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.65, + 0.826, + 0.72 + ], + "angle": 0, + "content": "- Hyperparameter Tuning: Careful tuning of the learning rate and the number of training epochs was performed. Consistent with our pre-training findings, the 1-bit model benefited from a relatively larger learning rate during SFT compared to typical full-precision model fine-tuning. Furthermore, achieving optimal convergence required extending the fine-tuning duration over a larger number of epochs than full-precision models of similar size." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.604, + 0.826, + 0.72 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.736, + 0.482, + 0.752 + ], + "angle": 0, + "content": "3.3 Direct Preference Optimization (DPO)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.762, + 0.826, + 0.845 + ], + "angle": 0, + "content": "To further align the model's behavior with human preferences regarding helpfulness and safety, we applied Direct Preference Optimization (DPO) (Rafailov et al., 2023) following the SFT phase. DPO offers an efficient alternative to traditional RLHF by directly optimizing the language model using preference data, thereby circumventing the need to train a separate reward model. This DPO stage served to refine the model's conversational prowess and overall alignment with desired interaction patterns in practical use cases." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.86, + 0.325, + 0.875 + ], + "angle": 0, + "content": "3.3.1 Training Data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.827, + 0.914 + ], + "angle": 0, + "content": "The preference dataset used for DPO training was constructed from a combination of publicly available resources recognized for capturing diverse human judgments on model outputs. Specifically," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "we utilized UltraFeedback (Cui et al., 2024) and MagPie (Xu et al., 2024c). The aggregation of these datasets provided a robust and multifaceted preference signal, guiding the model towards generating responses more aligned with human expectations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.148, + 0.34, + 0.163 + ], + "angle": 0, + "content": "3.3.2 Training Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.171, + 0.827, + 0.256 + ], + "angle": 0, + "content": "The DPO training phase was conducted for 2 epochs. We employed a learning rate of \\(2 \\times 10^{-7}\\) and set the DPO beta parameter, which controls the divergence from the reference policy, to 0.1. To enhance training efficiency during this phase, we integrated optimized kernels from the Liger Kernel library (Hsu et al., 2024). Qualitatively, our observations indicate that the DPO process effectively steered the model towards preferred response styles without inducing significant degradation in the core capabilities established during pre-training and SFT." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.274, + 0.298, + 0.29 + ], + "angle": 0, + "content": "4 Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.304, + 0.7, + 0.32 + ], + "angle": 0, + "content": "We measure performance on a wide variety of benchmarks classified as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.33, + 0.825, + 0.385 + ], + "angle": 0, + "content": "- Language understanding and reasoning: ARC-Easy (Yadav et al., 2019), ARC-Challenge (Yadav et al., 2019), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2020), PIQA (Bisk et al., 2019), OpenbookQA (Mihaylov et al., 2018), and CommonsenseQA (Talmor et al., 2019)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.389, + 0.81, + 0.404 + ], + "angle": 0, + "content": "- World knowledge: TruthfulQA (Lin et al., 2022) and MMLU (Hendrycks et al., 2021a)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.407, + 0.804, + 0.423 + ], + "angle": 0, + "content": "- Reading comprehension: TriviaQA (Joshi et al., 2017) and BoolQ (Clark et al., 2019)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.426, + 0.825, + 0.454 + ], + "angle": 0, + "content": "- Math and code: GSM8K (Cobbe et al., 2021), MATH-500 (Hendrycks et al., 2021b) and HumanEval+ (Liu et al., 2023)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.458, + 0.825, + 0.485 + ], + "angle": 0, + "content": "- Instruction following and conversation: IFEval (Zhou et al., 2023) and MT-bench (Zheng et al., 2023)" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.33, + 0.825, + 0.485 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.497, + 0.827, + 0.581 + ], + "angle": 0, + "content": "We compare BitNet b1.58 2B4T with leading open-weight full precision LLMs of similar size, including LLaMA 3.2 1B (Dubey et al., 2024), Gemma-3 1B (Team et al., 2025), Qwen2.5 1.5B (Yang et al., 2024), SmolLM2 1.7B (Allal et al., 2025) and MiniCPM 2B (Hu et al., 2024). All models are instruction-tuned versions. We re-run all benchmarks with a public evaluation pipeline for a fair comparison. More evaluation details are available at the appendix. The main results are presented in Table 1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.596, + 0.307, + 0.61 + ], + "angle": 0, + "content": "4.1 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.621, + 0.827, + 0.691 + ], + "angle": 0, + "content": "As shown in Table 1, BitNet b1.58 2B4T demonstrates remarkable resource efficiency. Its non-embedding memory footprint and estimated energy consumption (Horowitz, 2014; Zhang et al., 2022) during decoding are substantially lower compared to all the full-precision models evaluated, highlighting a significant advantage in operational cost and deployability on resource-constrained devices." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.697, + 0.827, + 0.795 + ], + "angle": 0, + "content": "In terms of task performance, BitNet b1.58 2B4T proves highly competitive. It achieves the best results among the compared models on several benchmarks spanning reasoning, knowledge, and math capabilities. On other benchmarks, its performance is closely comparable to the top-performing full-precision models. While some full-precision models show slight advantages on specific tasks or the overall average, BitNet b1.58 2B4T delivers strong performance across the board. The results indicate that BitNet b1.58 2B4T achieves capabilities nearly on par with leading models in its size class while offering dramatically improved efficiency." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.81, + 0.563, + 0.826 + ], + "angle": 0, + "content": "4.2 Comparison with Post-training Quantized Models" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.835, + 0.826, + 0.879 + ], + "angle": 0, + "content": "We further investigate the efficiency-performance trade-off by comparing BitNet b1.58 2B4T against post-training quantized (PTQ) versions of a leading competitor, Qwen2.5 1.5B, using standard INT4 methods (GPTQ and AWQ). The results are summarized in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.883, + 0.829, + 0.913 + ], + "angle": 0, + "content": "While INT4 quantization successfully reduces the memory footprint of the full-precision model, BitNet b1.58 2B4T achieves an even lower memory requirement due to its native 1-bit architecture." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.182, + 0.088, + 0.813, + 0.673 + ], + "angle": 0, + "content": "
Benchmark (Metric)LLaMA 3.21BGemma-31BQwen2.51.5BSmolLM21.7BMiniCPM2BBitNet b1.582B
Memory(Non-emb)2GB1.4GB2.6GB3.2GB4.8GB0.4GB
Latency(CPU; TPOT)48ms41ms65ms67ms124ms29ms
Energy(Estimated)0.258J0.186J0.347J0.425J0.649J0.028J
Training Tokens(Pre-training)9T(pruning & distillation)2T(distillation)18T11T1.1T4T
ARC-Challange(0-shot; Acc,norm)37.8038.4046.6743.5244.8049.91
ARC-Easy(0-shot; Acc,norm)63.1763.1376.0162.9272.1474.79
OpenbookQA(0-shot; Acc,norm)34.8038.8040.8046.0040.2041.60
BoolQ(0-shot; Acc)64.6574.2278.0475.7880.6780.18
HellaSwag(0-shot; Acc,norm)60.8057.6968.2871.7170.8168.44
PIQA(0-shot; Acc,norm)74.2171.9376.1276.1276.6677.09
WinoGrande(0-shot; Acc)59.5158.4862.8368.9861.8071.90
CommonsenseQA(10-shot; Acc)58.4842.1076.4163.5571.7471.58
TruthfulQA(10-shot; MC2)43.8038.6646.6739.9041.4145.31
TriviaQA(5-shot; EM)37.6023.4938.3745.9734.1333.57
MMLU(5-shot; Acc)45.5839.9160.2549.2451.8253.17
HumanEval+(0-shot; Pass@1)31.1037.2050.6028.0043.9038.40
GSM8K(4-shot; EM)38.2131.1656.7945.114.4058.38
MATH-500(0-shot; EM)23.0042.0053.0017.6014.8043.40
IFEval(0-shot; Instruct-Strict)62.7166.6750.1257.9136.8153.48
MT-bench(0-shot; Average)5.436.406.125.506.575.85
Average44.9043.7455.2348.7042.0554.19
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.678, + 0.825, + 0.72 + ], + "angle": 0, + "content": "Table 1: Comparison of BitNet b1.58 2B4T with leading open-weight full-precision LLMs of similar size (1B-2B parameters) on efficiency metrics and performance across a wide range of benchmarks. All models compared are instruction-tuned versions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.814, + 0.825, + 0.913 + ], + "angle": 0, + "content": "More importantly, this superior memory efficiency does not compromise performance relative to the quantized models. Standard PTQ techniques lead to a noticeable degradation in performance compared to the original full-precision model. In contrast, BitNet b1.58 2B4T maintains stronger overall performance than the INT4 quantized versions of Qwen2.5-1.5B on the evaluated benchmarks. This comparison suggests that BitNet b1.58 2B4T represents a more favorable point on the efficiency-performance curve than applying conventional INT4 PTQ to existing architectures, offering better performance with lower resource usage." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.947 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.205, + 0.089, + 0.791, + 0.284 + ], + "angle": 0, + "content": "
Benchmark (Metric)Qwen2.5BitNet b1.58
1.5B-bf161.5B-GPTQ-int41.5B-AWQ-int42B
Memory \n(Non-emb)2.6GB0.7GB0.7GB0.4GB
Activationbf16bf16bf16int8
MMLU \n(5-shot; Acc)60.2558.0657.4353.17
GSM8K \n(4-shot; EM)56.7950.5750.6458.38
IFEval \n(0-shot; Instruct-Strict)50.1247.8445.4453.48
Average55.7252.1551.1755.01
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.289, + 0.828, + 0.334 + ], + "angle": 0, + "content": "Table 2: Comparison of BitNet b1.58 (2B) against Qwen2.5 1.5B in its original bf16 precision and after INT4 post-training quantization (GPTQ and AWQ). All models shown are based on instruction-tuned checkpoints." + }, + { + "type": "table", + "bbox": [ + 0.194, + 0.355, + 0.803, + 0.713 + ], + "angle": 0, + "content": "
Benchmark (Metric)Bonsai 0.5BOLMo-Bitnet 1BFalcon3-1.58bit 7BLlama3-8B-1.58 8BBitNet b1.58 2B
Native 1-bit
ARC-Challange (0-shot; Acc,norm)33.1926.5437.8043.6949.91
ARC-Easy (0-shot; Acc,norm)58.2525.3865.0370.7174.79
OpenbookQA (0-shot; Acc,norm)33.6028.2038.2037.2041.60
BoolQ (0-shot; Acc)58.4452.4872.1468.3880.18
HellaSwag (0-shot; Acc,norm)48.0125.8859.4668.5668.44
PIQA (0-shot; Acc,norm)70.0250.4972.3675.3077.09
WinoGrande (0-shot; Acc)54.4651.5460.1460.9371.90
CommonsenseQA (10-shot; Acc)18.4319.4967.0828.5071.58
TruthfulQA (10-shot; MC2)40.6549.0543.2939.1345.31
TriviaQA (5-shot; EM)10.840.000.0019.8233.57
MMLU (5-shot; Acc)25.7425.4742.7935.0453.17
Average41.0632.2250.7649.7560.68
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.717, + 0.829, + 0.761 + ], + "angle": 0, + "content": "Table 3: Performance comparison of BitNet b1.58 2B4T against other open-weight 1-bit models. This includes natively trained 1-bit models (Bonsai-0.5B, OLMo-Bitnet-1B) and larger models posttraining quantized to 1.58-bit (Falcon3-1.58bit-7B, Llama3-8B-1.58)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.795, + 0.522, + 0.811 + ], + "angle": 0, + "content": "4.3 Comparison with Open-weight 1-bit Models" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.821, + 0.825, + 0.879 + ], + "angle": 0, + "content": "Finally, we situate BitNet b1.58 2B4T within the landscape of other models designed for or quantized to near 1-bit precision. We compare it against natively trained 1-bit models of smaller scale and significantly larger models post-training quantized to 1.58-bit precision. The comparative results are presented in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.829, + 0.914 + ], + "angle": 0, + "content": "The evaluation clearly positions BitNet b1.58 2B4T as the leading model in this category. It demonstrates significantly stronger overall performance than all other compared 1-bit models, achieving" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "the highest scores on the vast majority of benchmarks. Notably, BitNet b1.58 2B4T substantially outperforms not only the smaller, natively trained 1-bit models but also the much larger models (in terms of parameter count) that were quantized to 1-bit. This highlights the effectiveness of the native training approach employed by BitNet b1.58 2B4T, allowing it to set a new state-of-the-art performance level for models operating at this extreme level of quantization, even surpassing larger models subjected to post-training quantization." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.207, + 0.423, + 0.223 + ], + "angle": 0, + "content": "5 Inference Implementation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.827, + 0.328 + ], + "angle": 0, + "content": "Efficient inference is crucial for deploying Large Language Models, particularly for resource-constrained environments. The unique quantization scheme of BitNet b1.58 2B4T, employing 1.58-bit weights and 8-bit activations (W1.58A8), necessitates specialized implementations, as standard deep learning libraries often lack optimized kernels for such mixed-precision, low-bit formats. To address this, we developed and open-sourced dedicated inference libraries for both GPU and CPU platforms. The code is publicly available at https://aka.ms/bitnet." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.355, + 0.318, + 0.369 + ], + "angle": 0, + "content": "5.1 GPU Inference" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.385, + 0.825, + 0.455 + ], + "angle": 0, + "content": "Current GPU architectures and their associated software libraries (e.g., cuBLAS, PyTorch kernels) are primarily optimized for operations involving standard data types like FP16, BF16, and INT8/INT4. Native, high-performance support for the specific W1.58A8 matrix multiplication required by BitNet b1.58 2B4T is generally unavailable. This limitation can hinder the realization of the theoretical efficiency gains offered by 1-bit models on existing hardware." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.46, + 0.826, + 0.613 + ], + "angle": 0, + "content": "To enable efficient GPU inference, we developed a custom CUDA kernel specifically designed for the W1.58A8 matrix multiplication. Since ternary weights \\(\\{-1,0, + 1\\}\\), representing 1.58 bits) cannot be stored efficiently using standard data types, we pack multiple weight values into a single 8-bit integer ('int8') for storage in High Bandwidth Memory (HBM). Specifically, four ternary values are encoded into one 'int8' value. During computation, the CUDA kernel loads the packed 'int8' weights from HBM into the GPU's faster on-chip Shared Memory (SRAM). It then unpacks these values back into a representation suitable for efficient ternary computation (e.g., reconstructing the -1, 0, +1 values) immediately before performing the matrix multiplication with the 8-bit activations. This 'pack-store-load-unpack-compute' strategy minimizes memory bandwidth usage while leveraging custom compute instructions. Further implementation details and optimization strategies are elaborated in the Ladder framework (Wang et al., 2023b)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.619, + 0.825, + 0.687 + ], + "angle": 0, + "content": "While our custom kernel significantly improves performance compared to naive implementations, we note that current commodity GPU architectures are not optimally designed for the 1-bit models. We believe that future hardware innovations, potentially incorporating dedicated logic for low-bit operations, will be essential to fully unlock the performance and energy efficiency potential of models like BitNet b1.58." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.716, + 0.317, + 0.73 + ], + "angle": 0, + "content": "5.2 CPU Inference" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.746, + 0.825, + 0.789 + ], + "angle": 0, + "content": "To ensure broad accessibility and enable deployment on devices lacking powerful GPUs (e.g., edge devices, laptops, standard servers), we developed bitnet.cpp. This C++ library serves as an official reference implementation for CPU inference of 1-bit LLMs, including BitNet b1.58." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.825, + 0.864 + ], + "angle": 0, + "content": "bitnet.cpp provides optimized kernels tailored for efficient execution on standard CPU architectures. The kernels are designed to operate efficiently with the model's specific quantization scheme, avoiding the overhead of generic quantization libraries or intricate low-level bit manipulation where possible. It processes the weight elements in a manner consistent with the BitNet b1.58 training methodology, ensuring numerical accuracy (lossless inference relative to the training procedure)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "This approach delivers fast and accurate inference of 1.58-bit models directly on CPUs. More technical details and usage instructions can be found in the bitnet.cpp repository and associated technical report (Wang et al., 2025)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.091, + 0.303, + 0.108 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.123, + 0.825, + 0.179 + ], + "angle": 0, + "content": "This technical report introduced BitNet b1.58 2B4T, a significant step towards highly efficient yet capable Large Language Models. As the first open-source, native 1-bit LLM trained at the 2-billion parameter scale on 4 trillion tokens, our work demonstrates the viability of extreme quantization directly within the training process." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.185, + 0.827, + 0.298 + ], + "angle": 0, + "content": "Comprehensive evaluations across benchmarks assessing language understanding, reasoning, mathematics, coding, and dialogue revealed that BitNet b1.58 2B4T achieves performance comparable to state-of-the-art open-weight, full-precision models of similar size. Crucially, this performance parity is achieved with dramatically reduced computational requirements, offering substantial savings in memory footprint, energy consumption, and inference latency. To facilitate practical use and further research, we developed and released optimized inference implementations for both GPU (via custom CUDA kernels) and CPU (via the 'bitnet.cpp' library), alongside the model weights available on Hugging Face." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.302, + 0.828, + 0.359 + ], + "angle": 0, + "content": "BitNet b1.58 2B4T represents a compelling proof-of-concept that challenges the necessity of full-precision weights for achieving high performance in LLMs at scale. It opens avenues for deploying powerful language models in resource-constrained environments where previous models were prohibitive, potentially democratizing access to advanced AI capabilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.38, + 0.356, + 0.397 + ], + "angle": 0, + "content": "7 Future Directions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.414, + 0.827, + 0.43 + ], + "angle": 0, + "content": "While BitNet b1.58 2B4T demonstrates promising results, several exciting research directions remain:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.443, + 0.825, + 0.498 + ], + "angle": 0, + "content": "- Scaling Laws and Larger Models: Investigating the scaling properties of native 1-bit LLMs is crucial. Future work will explore training larger models (e.g., 7B, 13B parameters and beyond) and training on even larger datasets to understand if the performance parity with full-precision models holds." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.506, + 0.825, + 0.577 + ], + "angle": 0, + "content": "- Hardware Co-Design and Optimization: The full potential of 1-bit models is likely hindered by current hardware limitations. Continued development of highly optimized kernels for existing hardware (GPUs, CPUs, NPUs) is needed. Furthermore, co-designing future hardware accelerators specifically optimized for 1-bit computations and data movement could unlock orders-of-magnitude improvements in speed and energy efficiency." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.583, + 0.827, + 0.667 + ], + "angle": 0, + "content": "- Extended Sequence Length: Extending the maximum sequence length of BitNet b1.58 2B4T can process is crucial. This enhancement is vital for tasks demanding long-context understanding, such as summarizing lengthy documents or engaging in complex problem-solving, and is particularly critical for improving performance on long chain-of-thought reasoning tasks. Investigating efficient attention mechanisms suitable for low-bit models at longer sequence lengths will be key." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.673, + 0.827, + 0.716 + ], + "angle": 0, + "content": "- Multilingual Capabilities: The current model is primarily trained on English-centric data. Extending the pre-training corpus and potentially adapting the architecture to effectively support multiple languages is a key direction for broader applicability." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.722, + 0.825, + 0.779 + ], + "angle": 0, + "content": "- Multimodal Integration: Exploring the integration of 1-bit principles into multimodal architectures is another promising frontier. Developing efficient ways to process and fuse information from different modalities (e.g., text and images) within a low-bit framework could enable new applications." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.786, + 0.827, + 0.842 + ], + "angle": 0, + "content": "- Theoretical Understanding: Delving deeper into the theoretical underpinnings of why 1-bit training at scale is effective remains an open area. Analyzing the learning dynamics, loss landscapes, and representational properties of these models could yield valuable insights for future development." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.443, + 0.827, + 0.842 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "By pursuing these directions, we aim to further advance the capability and efficiency of 1-bit LLMs, paving the way for more sustainable and accessible artificial intelligence. The open-source release of BitNet b1.58 2B4T and its associated tools provides a foundation for the community to build upon these efforts." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.115, + 0.829, + 0.185 + ], + "angle": 0, + "content": "Allal, L. B., Lozhkov, A., Bakouch, E., Blázquez, G. M., Penedo, G., Tunstall, L., Marafioti, A., Kydlíček, H., Lajarín, A. P., Srivastav, V., Lochner, J., Fahlgren, C., Nguyen, X.-S., Fourrier, C., Burtenshaw, B., Larcher, H., Zhao, H., Zakka, C., Morlon, M., Raffel, C., von Werra, L., and Wolf, T. (2025). Smollm2: When smol goes big - data-centric training of a small language model. CoRR, abs/2502.02737." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.196, + 0.829, + 0.252 + ], + "angle": 0, + "content": "Bai, S., Chen, K., Liu, X., Wang, J., Ge, W., Song, S., Dang, K., Wang, P., Wang, S., Tang, J., Zhong, H., Zhu, Y., Yang, M.-H., Li, Z., Wan, J., Wang, P., Ding, W., Fu, Z., Xu, Y., Ye, J., Zhang, X., Xie, T., Cheng, Z., Zhang, H., Yang, Z., Xu, H., and Lin, J. (2025). Qwen2.5-vl technical report. CoRR, abs/2502.13923." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.263, + 0.826, + 0.293 + ], + "angle": 0, + "content": "Bisk, Y., Zellers, R., Bras, R. L., Gao, J., and Choi, Y. (2019). PIQA: reasoning about physical commonsense in natural language. CoRR, abs/1911.11641." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.303, + 0.826, + 0.333 + ], + "angle": 0, + "content": "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. (2019). Boolq: Exploring the surprising difficulty of natural yes/no questions. CoRR, abs/1905.10044." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.343, + 0.829, + 0.385 + ], + "angle": 0, + "content": "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. (2021). Training verifiers to solve math word problems. CoRR, abs/2110.14168." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.396, + 0.826, + 0.44 + ], + "angle": 0, + "content": "Cui, G., Yuan, L., Ding, N., Yao, G., He, B., Zhu, W., Ni, Y., Xie, G., Xie, R., Lin, Y., Liu, Z., and Sun, M. (2024). ULTRAFEEDBACK: boosting language models with scaled AI feedback. In ICML. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.45, + 0.829, + 0.631 + ], + "angle": 0, + "content": "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Rozière, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I. M., Misra, I., Evtimov, I., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., and et al. (2024). The llama 3 herd of models. CoRR, abs/2407.21783." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.641, + 0.829, + 0.684 + ], + "angle": 0, + "content": "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. (2021a). Measuring massive multitask language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.695, + 0.829, + 0.751 + ], + "angle": 0, + "content": "Hendrycks, D., Burns, C., Kadavath, S., Arora, A., Basart, S., Tang, E., Song, D., and Steinhardt, J. (2021b). Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.762, + 0.829, + 0.805 + ], + "angle": 0, + "content": "Horowitz, M. (2014). 1.1 computing's energy problem (and what we can do about it). In 2014 IEEE International Conference on Solid-State Circuits Conference, ISSCC 2014, Digest of Technical Papers, San Francisco, CA, USA, February 9-13, 2014, pages 10-14." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.816, + 0.829, + 0.845 + ], + "angle": 0, + "content": "Hsu, P.-L., Dai, Y., Kothapalli, V., Song, Q., Tang, S., Zhu, S., Shimizu, S., Sahni, S., Ning, H., and Chen, Y. (2024). Liger kernel: Efficient triton kernels for LLM training. CoRR, abs/2410.10989." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.856, + 0.829, + 0.912 + ], + "angle": 0, + "content": "Hu, S., Tu, Y., Han, X., He, C., Cui, G., Long, X., Zheng, Z., Fang, Y., Huang, Y., Zhao, W., Zhang, X., Thai, Z. L., Zhang, K., Wang, C., Yao, Y., Zhao, C., Zhou, J., Cai, J., Zhai, Z., Ding, N., Jia, C., Zeng, G., Li, D., Liu, Z., and Sun, M. (2024). Minicpm: Unveiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.115, + 0.829, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.15 + ], + "angle": 0, + "content": "Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. (2017). Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1601-1611." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.156, + 0.827, + 0.213 + ], + "angle": 0, + "content": "Li, H., Dong, Q., Tang, Z., Wang, C., Zhang, X., Huang, H., Huang, S., Huang, X., Huang, Z., Zhang, D., Gu, Y., Cheng, X., Wang, X., Chen, S.-Q., Dong, L., Lu, W., Sui, Z., Wang, B., Lam, W., and Wei, F. (2024a). Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.221, + 0.828, + 0.389 + ], + "angle": 0, + "content": "Li, J., Fang, A., Smyrnis, G., Ivgi, M., Jordan, M., Gadre, S. Y., Bansal, H., Guha, E., Keh, S. S., Arora, K., Garg, S., Xin, R., Muennighoff, N., Heckel, R., Mercat, J., Chen, M. F., Gururangan, S., Wortsman, M., Albalak, A., Bitton, Y., Nezhurina, M., Abbas, A., Hsieh, C.-Y., Ghosh, D., Gardner, J., Kilian, M., Zhang, H., Shao, R., Pratt, S. M., Sanyal, S., Ilharco, G., Daras, G., Marathe, K., Gokaslan, A., Zhang, J., Chandu, K. R., Nguyen, T., Vasiljevic, I., Kakade, S. M., Song, S., Sanghavi, S., Faghri, F., Oh, S., Zettlemoyer, L., Lo, K., El-Nouby, A., Pouransari, H., Toshev, A., Wang, S., Groeneveld, D., Soldaini, L., Koh, P. W., Jitsev, J., Kollar, T., Dimakis, A., Carmon, Y., Dave, A., Schmidt, L., and Shankar, V. (2024b). Datacomp-lm: In search of the next generation of training sets for language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.397, + 0.827, + 0.427 + ], + "angle": 0, + "content": "Lian, W., Wang, G., Goodson, B., Pentland, E., Cook, A., Vong, C., and \"Teknium\" (2023). Slimorca: An open dataset of gpt-4 augmented flan reasoning traces, with verification." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.434, + 0.827, + 0.478 + ], + "angle": 0, + "content": "Lin, S., Hilton, J., and Evans, O. (2022). Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.485, + 0.827, + 0.529 + ], + "angle": 0, + "content": "Liu, J., Xia, C. S., Wang, Y., and Zhang, L. (2023). Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. Advances in Neural Information Processing Systems, 36:21558-21572." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.537, + 0.827, + 0.567 + ], + "angle": 0, + "content": "Ma, S., Wang, H., Ma, L., Wang, L., Wang, W., Huang, S., Dong, L., Wang, R., Xue, J., and Wei, F. (2024). The era of 1-bit llms: All large language models are in 1.58 bits. CoRR, abs/2402.17764." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.575, + 0.827, + 0.605 + ], + "angle": 0, + "content": "Mihaylov, T., Clark, P., Khot, T., and Sabharwal, A. (2018). Can a suit of armor conduct electricity? A new dataset for open book question answering. CoRR, abs/1809.02789." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.612, + 0.828, + 0.697 + ], + "angle": 0, + "content": "Penedo, G., Kydlícek, H., Allal, L. B., Lozhkov, A., Mitchell, M., Raffel, C. A., von Werra, L., and Wolf, T. (2024). The fineweb datasets: Decanting the web for the finest text data at scale. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.705, + 0.828, + 0.763 + ], + "angle": 0, + "content": "Rafailov, R., Sharma, A., Mitchell, E., Manning, C. D., Ermon, S., and Finn, C. (2023). Direct preference optimization: Your language model is secretly a reward model. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S., editors, Advances in Neural Information Processing Systems 36." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.77, + 0.825, + 0.8 + ], + "angle": 0, + "content": "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. (2020). WinoGrande: an adversarial winograd schema challenge at scale. In AAAI, pages 8732-8740." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.807, + 0.825, + 0.838 + ], + "angle": 0, + "content": "Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. (2017). Proximal policy optimization algorithms. CoRR, abs/1707.06347." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.845, + 0.827, + 0.888 + ], + "angle": 0, + "content": "Shao, Z., Wang, P., Zhu, Q., Xu, R., Song, J., Zhang, M., Li, Y. K., Wu, Y., and Guo, D. (2024). Deepseekmath: Pushing the limits of mathematical reasoning in open language models. CoRR, abs/2402.03300." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.897, + 0.7, + 0.913 + ], + "angle": 0, + "content": "Shazeer, N. (2020). GLU variants improve transformer. CoRR, abs/2002.05202." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "Su, J., Ahmed, M. H. M., Lu, Y., Pan, S., Bo, W., and Liu, Y. (2024). Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.127, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Talmor, A., Herzig, J., Lourie, N., and Berant, J. (2019). Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4149-4158." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.205, + 0.826, + 0.248 + ], + "angle": 0, + "content": "Tang, Z., Zhang, X., Wang, B., and Wei, F. (2024). Mathscale: Scaling instruction tuning for mathematical reasoning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.255, + 0.547, + 0.272 + ], + "angle": 0, + "content": "Team, F.-L. (2024). The falcon 3 family of open models." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.278, + 0.829, + 0.31 + ], + "angle": 0, + "content": "Team, G., Kamath, A., Ferret, J., Pathak, S., Vieillard, N., Merhej, R., Perrin, S., Matejovicova, T., Ram'e, A., Rivi'ere, M., et al. (2025). Gemma 3 technical report. arXiv preprint arXiv:2503.19786." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.315, + 0.826, + 0.386 + ], + "angle": 0, + "content": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. In Guyon, I., von Luxburg, U., Bengio, S., Wallach, H. M., Fergus, R., Vishwanathan, S. V. N., and Garnett, R., editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.393, + 0.827, + 0.424 + ], + "angle": 0, + "content": "Wang, H., Ma, S., Dong, L., Huang, S., Wang, H., Ma, L., Yang, F., Wang, R., Wu, Y., and Wei, F. (2023a). Bitnet: Scaling 1-bit transformers for large language models. CoRR, abs/2310.11453." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.43, + 0.827, + 0.472 + ], + "angle": 0, + "content": "Wang, H., Ma, S., Huang, S., Dong, L., Wang, W., Peng, Z., Wu, Y., Bajaj, P., Singhal, S., Benhaim, A., Patra, B., Liu, Z., Chaudhary, V., Song, X., and Wei, F. (2022). Foundation transformers. CoRR." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.48, + 0.825, + 0.51 + ], + "angle": 0, + "content": "Wang, H., Ma, S., Wang, R., and Wei, F. (2024a). Q-sparse: All large language models can be fully sparsely-activated. CoRR, abs/2407.10969." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.516, + 0.827, + 0.546 + ], + "angle": 0, + "content": "Wang, H., Ma, S., and Wei, F. (2024b). Bitnet a4.8: 4-bit activations for 1-bit llms. CoRR, abs/2411.04965." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.553, + 0.827, + 0.583 + ], + "angle": 0, + "content": "Wang, J., Zhou, H., Song, T., Cao, S., Xia, Y., Cao, T., Wei, J., Ma, S., Wang, H., and Wei, F. (2025). Bitnet.cpp: Efficient edge inference for ternary lms. CoRR, abs/2502.11880." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.59, + 0.827, + 0.62 + ], + "angle": 0, + "content": "Wang, L., Ma, L., Cao, S., Zheng, N., Zhang, Q., Xue, J., Miao, Z., Cao, T., and Yang, Y. (2023b). Ladder: Efficient tensor compilation on customized data format. In OSDI." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.626, + 0.826, + 0.683 + ], + "angle": 0, + "content": "Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., Lin, Q., and Jiang, D. (2024a). Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.69, + 0.826, + 0.761 + ], + "angle": 0, + "content": "Xu, Y., Han, X., Yang, Z., Wang, S., Zhu, Q., Liu, Z., Liu, W., and Che, W. (2024b). Onebit: Towards extremely low-bit large language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.768, + 0.827, + 0.799 + ], + "angle": 0, + "content": "Xu, Z., Jiang, F., Niu, L., Deng, Y., Poovendran, R., Choi, Y., and Lin, B. Y. (2024c). Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. CoRR, abs/2406.08464." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.805, + 0.825, + 0.835 + ], + "angle": 0, + "content": "Yadav, V., Bethard, S., and Surdeanu, M. (2019). Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering. In EMNLP-IJCNLP." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.842, + 0.826, + 0.912 + ], + "angle": 0, + "content": "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., Lin, H., Yang, J., Tu, J., Zhang, J., Yang, J., Yang, J., Zhou, J., Lin, J., Dang, K., Lu, K., Bao, K., Yang, K., Yu, L., Li, M., Xue, M., Zhang, P., Zhu, Q., Men, R., Lin, R., Li, T., Xia, T., Ren, X., Ren, X., Fan, Y., Su, Y., Zhang, Y., Wan, Y., Liu, Y., Cui, Z., Zhang, Z., and Qiu, Z. (2024). Qwen2.5 technical report. CoRR, abs/2412.15115." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.829, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. (2019). HellaSwag: can a machine really finish your sentence? In Proceedings of the 57th Conference of the Association for Computational Linguistics, pages 4791-4800." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.144, + 0.826, + 0.174 + ], + "angle": 0, + "content": "Zhang, Y., Zhang, Z., and Lew, L. (2022). PokeBNN: A binary pursuit of lightweight accuracy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12465-12475. IEEE." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.182, + 0.827, + 0.226 + ], + "angle": 0, + "content": "Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. (2024). Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.233, + 0.827, + 0.291 + ], + "angle": 0, + "content": "Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E. P., Gonzalez, J. E., Stoica, I., and Zhang, H. (2024). Lmsys-chat-1m: A large-scale real-world LLM conversation dataset. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.299, + 0.827, + 0.343 + ], + "angle": 0, + "content": "Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. (2023). Judging lvm-as-a-judge with mt-bench and chatbot arena. In Advances in Neural Information Processing Systems 36." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.351, + 0.827, + 0.381 + ], + "angle": 0, + "content": "Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. (2023). Instruction-following evaluation for large language models. CoRR, abs/2311.07911." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.091, + 0.827, + 0.381 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.406, + 0.402, + 0.424 + ], + "angle": 0, + "content": "A Open-weight Baselines" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.437, + 0.719, + 0.453 + ], + "angle": 0, + "content": "We summarize the links to the open-weight LLMs evaluated in this work as below:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.465, + 0.622, + 0.479 + ], + "angle": 0, + "content": "- LLaMA 3.2 1B: meta-llama/Llama-3.2-1B-Instruct" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.484, + 0.508, + 0.499 + ], + "angle": 0, + "content": "- Gemma-3 1B: google/gemma-3-1b-it" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.503, + 0.563, + 0.518 + ], + "angle": 0, + "content": "Qwen2.5 0.5B: Qwen/Qwen2.5-0.5B-Instruct" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.522, + 0.563, + 0.537 + ], + "angle": 0, + "content": "- Qwen2.5 1.5B: Qwen/Qwen2.5-1.5B-Instruct" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.541, + 0.534, + 0.556 + ], + "angle": 0, + "content": "- Qwen2.5 3B: Qwen/Qwen2.5-3B-Instruct" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.56, + 0.647, + 0.575 + ], + "angle": 0, + "content": "- SmolLM2 1.7B: HuggingFaceTB/SmolLM2-1.7B-Instruct" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.579, + 0.57, + 0.594 + ], + "angle": 0, + "content": "- MiniCPM 2B: openbmb/MiniCPM-2B-dpo-bf16" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.598, + 0.731, + 0.613 + ], + "angle": 0, + "content": "- Qwen2.5 1.5B-GPTQ-int4: Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.618, + 0.673, + 0.633 + ], + "angle": 0, + "content": "Qwen2.5 1.5B-AWQ-int4: Qwen/Qwen2.5-1.5B-Instruct-AWQ" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.637, + 0.464, + 0.652 + ], + "angle": 0, + "content": "- Bonsai 0.5B: deepgrove/Bonsai" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.656, + 0.593, + 0.67 + ], + "angle": 0, + "content": "- OLMo-Bitnet 1B: NousResearch/OLMo-Bitnet-1B" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.675, + 0.664, + 0.689 + ], + "angle": 0, + "content": "- Falcon3-1.58bit 7B: tiiuae/Falcon3-7B-Instruct-1.58bit" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.694, + 0.686, + 0.708 + ], + "angle": 0, + "content": "- Llama3-8B-1.58 8B: HF1BitLLM/Llama3-8B-1.58-100B-tokens" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.465, + 0.731, + 0.708 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.728, + 0.436, + 0.746 + ], + "angle": 0, + "content": "B Evaluation Pipeline Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.759, + 0.828, + 0.789 + ], + "angle": 0, + "content": "To ensure standardized evaluation, we employed established toolkits for different benchmark categories. Specifically:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.799, + 0.724, + 0.814 + ], + "angle": 0, + "content": "- For the HumanEval+ coding benchmark, we utilized the evalplus toolkit." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.818, + 0.826, + 0.845 + ], + "angle": 0, + "content": "- For the MATH-500 mathematical reasoning benchmark, we used a customized version of the math-evaluation-harness toolkit." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.851, + 0.825, + 0.879 + ], + "angle": 0, + "content": "- For the MT-Bench conversational benchmark, evaluation was performed using the official LLM Judge open-source codebase." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.884, + 0.825, + 0.912 + ], + "angle": 0, + "content": "- For all other benchmarks assessing language understanding, reasoning, knowledge, and comprehension, we used the standard lm-evaluation-harness framework." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.799, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.364, + 0.089, + 0.634, + 0.148 + ], + "angle": 0, + "content": "
BitsADD EnergyMUL Energy
FP160.160.34
INT80.0070.07
" + }, + { + "type": "table_caption", + "bbox": [ + 0.182, + 0.153, + 0.816, + 0.169 + ], + "angle": 0, + "content": "Table 4: ADD and MUL energy consumption (in pJ) of different precision at \\(7\\mathrm{nm}\\) process nodes." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.194, + 0.825, + 0.222 + ], + "angle": 0, + "content": "Models were prompted using a chat format for generative tasks (e.g., GSM8K, IFEval, and MT-Bench), while default settings from the respective toolkits were used for other tasks." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.228, + 0.825, + 0.284 + ], + "angle": 0, + "content": "For energy consumption, we utilize the energy model in (Horowitz, 2014; Zhang et al., 2022) to estimate the arithmetic operations energy (AOE) of matrix multiplication. The sequence length is set as 512 tokens. We present the energy consumption for ADD and MUL operation at \\(7\\mathrm{nm}\\) process nodes in Table 4." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.29, + 0.825, + 0.36 + ], + "angle": 0, + "content": "To assess CPU decoding performance, latency measurements were conducted on a Surface Laptop Studio 2 system powered by a 13th Gen Intel Core i7-13800H processor. The benchmarking process utilized 8 CPU threads. Specifically, the BitNet b1.58 2B4T model was tested using its bitnet.cpp implementation, whereas other models were evaluated using the llama.cpp framework. For each model, we generated 128 tokens and report the average latency per token for this task." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "14" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_origin.pdf b/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e7ac9dd5c4cda95b8a2c1271f5ba3c9ff426bc76 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/2c3f7ef8-ab61-4b87-a7bf-c49da203744d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d926b0f3a796c4f78416808392482b06374c328a9d086452c09bfa09c74cfb85 +size 308962 diff --git a/data/2025/2504_12xxx/2504.12285/full.md b/data/2025/2504_12xxx/2504.12285/full.md new file mode 100644 index 0000000000000000000000000000000000000000..09567bd0415a3f969515fe02c730c7a6ac00c9b1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/full.md @@ -0,0 +1,298 @@ +# Abstract + +We introduce BitNet b1.58 2B4T, the first open-source, native 1-bit Large Language Model (LLM) at the 2-billion parameter scale. Trained on a corpus of 4 trillion tokens, the model has been rigorously evaluated across benchmarks covering language understanding, mathematical reasoning, coding proficiency, and conversational ability. Our results demonstrate that BitNet b1.58 2B4T achieves performance on par with leading open-weight, full-precision LLMs of similar size, while offering significant advantages in computational efficiency, including substantially reduced memory footprint, energy consumption, and decoding latency. To facilitate further research and adoption, the model weights are released via Hugging Face along with open-source inference implementations for both GPU and CPU architectures. + +BitNet b1.58 2B4T (1.58-bit): bitnet-b1.58-2B-4T + +The packed weight of BitNet b1.58 2B4T, used for inference only + +BitNet b1.58 2B4T (bf16): bitnet-b1.58-2B-4T-bf16 + +The master weight of BitNet b1.58 2B4T, used for training only + +BitNet b1.58 2B4T (gguf): bitnet-b1.58-2B-4T-gguf + +The GGUF format of BitNet b1.58 2B4T, used for bitnet.cpp + +BitNet b1.58 2B4T Code: bitnet.cpp Demo: aka.ms/bitnet-demo + +![](images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg) +Figure 1: BitNet b1.58 2B4T advances the Pareto frontier defined by leading open-weight LLMs under 3B parameters in terms of performance versus memory, demonstrating superior efficiency. + +# 1 Introduction + +Open-source large language models (LLMs) have become pivotal in democratizing access to advanced AI capabilities, fostering innovation, and enabling research across diverse fields such as natural language processing, code generation, and vision computing (Dubey et al., 2024; Yang et al., 2024; Bai et al., 2025). Their public availability allows for widespread experimentation and adaptation. However, a significant barrier hinders their broader adoption: the substantial computational resources required for deployment and inference. State-of-the-art open LLMs typically require large memory footprints, consume considerable energy, and exhibit notable inference latency, rendering them impractical for many edge devices, resource-constrained environments, and real-time applications. + +1-bit LLMs, representing an extreme yet promising form of model quantization where weights and potentially activations are constrained to binary $\{-1, +1\}$ or ternary $\{-1, 0, +1\}$ , offer a compelling solution to the efficiency challenges. By drastically reducing the memory required to store weights and enabling highly efficient bitwise computations, they have the potential to significantly lower deployment costs, reduce energy consumption, and accelerate inference speeds. While prior work has explored 1-bit models, existing open efforts often fall into two categories: 1) post-training quantization (PTQ) methods applied to pre-trained full-precision models, which can lead to significant performance degradation (Xu et al., 2024b; Team, 2024), or 2) native 1-bit models (trained from scratch with 1-bit weights) that have been developed at relatively smaller scales (e.g., OLMo-Bitnet-1B²]) and may not yet match the capabilities of larger, full-precision counterparts. This performance gap has limited the practical impact of 1-bit LLMs thus far. + +To bridge this gap between efficiency and performance, we introduce BitNet b1.58 2B4T, the first open-source, native 1-bit LLM trained at scale. This model, comprising 2 billion parameters, was trained from scratch on a substantial dataset of 4 trillion tokens, leveraging architectural and training innovations specific to the 1-bit paradigm. The core contribution of this work is to demonstrate that a native 1-bit LLM, when trained effectively at scale, can achieve performance comparable to leading open-weight, full-precision models of similar size across a wide range of tasks. + +This technical report details the development and evaluation of BitNet b1.58 2B4T. We describe the architecture and training methodology, and then present comprehensive evaluation results on standard benchmarks assessing language understanding, mathematical reasoning, coding proficiency, and multi-turn conversational abilities. Our findings confirm its strong performance relative to established full-precision baselines, coupled with significant advantages in efficiency. Finally, we announce the public release of the BitNet b1.58 2B4T model weights via Hugging Face and provide open-source inference code optimized for both GPU and CPU execution, aiming to facilitate further research and the practical deployment of highly efficient LLMs. + +# 2 Architecture + +The architecture of BitNet b1.58 2B4T is derived from the standard Transformer model (Vaswani et al., 2017), incorporating significant modifications based on the BitNet framework (Wang et al., 2023a; Ma et al., 2024). The model is trained entirely from scratch. + +The core architectural innovation lies in replacing the standard full-precision linear layers (torch(nn.Linear) with custom BitLinear layers. This constitutes the foundation of the BitNet approach. Within these BitLinear layers: + +- Weight Quantization: Model weights are quantized to 1.58 bits during the forward pass. This is achieved using an absolute mean (absmean) quantization scheme, which maps weights to ternary values $\{-1,0, + 1\}$ . This drastically reduces the model size and enables efficient mathematical operations. +- Activation Quantization: Activations flowing through the linear projection are quantized to 8-bit integers. This employs an absolute maximum (absmax) quantization strategy, applied per-token. +- Normalization: We incorporate subln normalization (Wang et al., 2022) to further enhance training stability, which can be particularly beneficial in quantized training regimes. + +Beyond the BitLinear layers, several established LLM techniques are integrated to enhance performance and stability: + +- Activation Function (FFN): Within the feed-forward network (FFN) sub-layers, instead of the commonly used SwiGLU activation (Shazeer, 2020), BitNet b1.58 2B4T employs squared ReLU $(\mathrm{ReLU}^2)$ . This choice is motivated by its potential to improve model sparsity and computational characteristics within the 1-bit context (Wang et al., 2024b,a). +- **Positional Embeddings:** Rotary Position Embeddings (RoPE) (Su et al., 2024) are used to inject positional information, a standard practice in modern high-performance LLMs. +- Bias Removal: Consistent with architectures like LLaMA, all bias terms are removed from the linear layers and normalization layers throughout the network, reducing parameter count and potentially simplifying quantization. + +For tokenization, we adopt the tokenizer developed for LLaMA 3 (Dubey et al., 2024). This tokenizer implements a byte-level Byte-Pair Encoding (BPE) scheme with a vocabulary size of 128,256 tokens. This choice ensures robust handling of diverse text and code, and its widespread adoption facilitates straightforward integration with existing open-source tooling and ecosystems. + +# 3 Training + +The training process for BitNet b1.58 2B4T involved three distinct phases: large-scale pre-training followed by supervised fine-tuning (SFT) and direct preference optimization (DPO). While advanced techniques like Proximal Policy Optimization (PPO) or Group Relative Policy Optimization (GRPO) can further enhance capabilities such as mathematics and chain-of-thought reasoning (Schulman et al., 2017; Shao et al., 2024), the current version of BitNet b1.58 2B4T relies solely on pre-training, SFT, and DPO. The exploration of reinforcement learning methods remains a direction for future work. + +# 3.1 Pre-training + +The pre-training phase aimed to imbue the model with broad world knowledge and foundational language capabilities. We adapted general training strategies from established LLM practices (Dubey et al., 2024), with specific adjustments tailored for the 1-bit architecture. + +# 3.1.1 Learning Rate Schedule + +A two-stage learning rate schedule was employed. + +1. **Stage 1 (High Learning Rate):** The initial phase utilized a standard cosine decay schedule but commenced with a relatively high peak learning rate. This decision was informed by the observation that 1-bit models often exhibit greater training stability compared to their full-precision counterparts, allowing for more aggressive initial learning steps. +2. **Stage 2 (Cooldown):** Approximately midway through the planned training token count, the learning rate was abruptly decayed and subsequently maintained via a cosine schedule with a significantly lower peak value. This "cooldown" phase allows the model to refine its representations on higher-quality data (see Section 3.1.3). + +# 3.1.2 Weight Decay Schedule + +Complementing the learning rate adjustments, a two-stage weight decay strategy was implemented. + +1. **Stage 1:** During the first training stage, weight decay followed a cosine schedule, reaching a peak value of 0.1. This regularization helps prevent overfitting during the initial high learning-rate phase. +2. **Stage 2:** In the second stage, weight decay was effectively disabled (set to zero). This allows the model parameters to settle into finer-grained optima guided by the lower learning rate and curated data. + +# 3.1.3 Pre-training Data + +The pre-training corpus comprised a mixture of publicly available text and code datasets, including large web crawls like DCLM (Li et al., 2024b) and educational web pages like FineWeb-EDU (Penedo et al., 2024). To enhance mathematical reasoning abilities, we also incorporated synthetically generated mathematical data. The data presentation strategy aligned with the two-stage training: the bulk of general web data was processed during Stage 1, while higher-quality curated datasets were emphasized during the Stage 2 cooldown phase, coinciding with the reduced learning rate. + +# 3.2 Supervised Fine-tuning (SFT) + +Following pre-training, the model underwent supervised fine-tuning (SFT) to enhance its instruction-following capabilities and improve its performance in conversational interaction formats. + +# 3.2.1 SFT Data + +The SFT phase utilized a diverse collection of publicly available instruction-following and conversational datasets. These included, but were not limited to, WildChat (Zhao et al., 2024), LMSYS-Chat1M (Zheng et al., 2024), WizardLM Evol-Instruct (Xu et al., 2024a), and SlimOrca (Lian et al., 2023). To further bolster specific capabilities, particularly in reasoning and complex instruction adherence, we supplemented these with synthetic datasets generated using methodologies like GLAN (Li et al., 2024a) and MathScale (Tang et al., 2024). + +# 3.2.2 Chat Template + +For conversational tasks during SFT and inference, the following chat template structure was employed: + +```txt +<|begin_of_text|>System: {system_message}<|eot_id|> +User: {user_message_1}<|eot_id| +Assistant: {assistant_message_1}<|eot_id| +User: {user_message_2}<|eot_id| +Assistant: {assistant_message_2}<|eot_id|... +``` + +# 3.2.3 Optimization Details + +Several optimization choices were key during SFT: + +- Loss Aggregation: Instead of averaging the cross-entropy loss across tokens within a batch (mean reduction), we employed summation. Empirically, we observed that summing the losses led to improved convergence and better final performance for this model. +- Hyperparameter Tuning: Careful tuning of the learning rate and the number of training epochs was performed. Consistent with our pre-training findings, the 1-bit model benefited from a relatively larger learning rate during SFT compared to typical full-precision model fine-tuning. Furthermore, achieving optimal convergence required extending the fine-tuning duration over a larger number of epochs than full-precision models of similar size. + +# 3.3 Direct Preference Optimization (DPO) + +To further align the model's behavior with human preferences regarding helpfulness and safety, we applied Direct Preference Optimization (DPO) (Rafailov et al., 2023) following the SFT phase. DPO offers an efficient alternative to traditional RLHF by directly optimizing the language model using preference data, thereby circumventing the need to train a separate reward model. This DPO stage served to refine the model's conversational prowess and overall alignment with desired interaction patterns in practical use cases. + +# 3.3.1 Training Data + +The preference dataset used for DPO training was constructed from a combination of publicly available resources recognized for capturing diverse human judgments on model outputs. Specifically, + +we utilized UltraFeedback (Cui et al., 2024) and MagPie (Xu et al., 2024c). The aggregation of these datasets provided a robust and multifaceted preference signal, guiding the model towards generating responses more aligned with human expectations. + +# 3.3.2 Training Details + +The DPO training phase was conducted for 2 epochs. We employed a learning rate of $2 \times 10^{-7}$ and set the DPO beta parameter, which controls the divergence from the reference policy, to 0.1. To enhance training efficiency during this phase, we integrated optimized kernels from the Liger Kernel library (Hsu et al., 2024). Qualitatively, our observations indicate that the DPO process effectively steered the model towards preferred response styles without inducing significant degradation in the core capabilities established during pre-training and SFT. + +# 4 Evaluation + +We measure performance on a wide variety of benchmarks classified as follows: + +- Language understanding and reasoning: ARC-Easy (Yadav et al., 2019), ARC-Challenge (Yadav et al., 2019), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2020), PIQA (Bisk et al., 2019), OpenbookQA (Mihaylov et al., 2018), and CommonsenseQA (Talmor et al., 2019) +- World knowledge: TruthfulQA (Lin et al., 2022) and MMLU (Hendrycks et al., 2021a) +- Reading comprehension: TriviaQA (Joshi et al., 2017) and BoolQ (Clark et al., 2019) +- Math and code: GSM8K (Cobbe et al., 2021), MATH-500 (Hendrycks et al., 2021b) and HumanEval+ (Liu et al., 2023) +- Instruction following and conversation: IFEval (Zhou et al., 2023) and MT-bench (Zheng et al., 2023) + +We compare BitNet b1.58 2B4T with leading open-weight full precision LLMs of similar size, including LLaMA 3.2 1B (Dubey et al., 2024), Gemma-3 1B (Team et al., 2025), Qwen2.5 1.5B (Yang et al., 2024), SmolLM2 1.7B (Allal et al., 2025) and MiniCPM 2B (Hu et al., 2024). All models are instruction-tuned versions. We re-run all benchmarks with a public evaluation pipeline for a fair comparison. More evaluation details are available at the appendix. The main results are presented in Table 1. + +# 4.1 Main Results + +As shown in Table 1, BitNet b1.58 2B4T demonstrates remarkable resource efficiency. Its non-embedding memory footprint and estimated energy consumption (Horowitz, 2014; Zhang et al., 2022) during decoding are substantially lower compared to all the full-precision models evaluated, highlighting a significant advantage in operational cost and deployability on resource-constrained devices. + +In terms of task performance, BitNet b1.58 2B4T proves highly competitive. It achieves the best results among the compared models on several benchmarks spanning reasoning, knowledge, and math capabilities. On other benchmarks, its performance is closely comparable to the top-performing full-precision models. While some full-precision models show slight advantages on specific tasks or the overall average, BitNet b1.58 2B4T delivers strong performance across the board. The results indicate that BitNet b1.58 2B4T achieves capabilities nearly on par with leading models in its size class while offering dramatically improved efficiency. + +# 4.2 Comparison with Post-training Quantized Models + +We further investigate the efficiency-performance trade-off by comparing BitNet b1.58 2B4T against post-training quantized (PTQ) versions of a leading competitor, Qwen2.5 1.5B, using standard INT4 methods (GPTQ and AWQ). The results are summarized in Table 2. + +While INT4 quantization successfully reduces the memory footprint of the full-precision model, BitNet b1.58 2B4T achieves an even lower memory requirement due to its native 1-bit architecture. + +
Benchmark (Metric)LLaMA 3.21BGemma-31BQwen2.51.5BSmolLM21.7BMiniCPM2BBitNet b1.582B
Memory(Non-emb)2GB1.4GB2.6GB3.2GB4.8GB0.4GB
Latency(CPU; TPOT)48ms41ms65ms67ms124ms29ms
Energy(Estimated)0.258J0.186J0.347J0.425J0.649J0.028J
Training Tokens(Pre-training)9T(pruning & distillation)2T(distillation)18T11T1.1T4T
ARC-Challange(0-shot; Acc,norm)37.8038.4046.6743.5244.8049.91
ARC-Easy(0-shot; Acc,norm)63.1763.1376.0162.9272.1474.79
OpenbookQA(0-shot; Acc,norm)34.8038.8040.8046.0040.2041.60
BoolQ(0-shot; Acc)64.6574.2278.0475.7880.6780.18
HellaSwag(0-shot; Acc,norm)60.8057.6968.2871.7170.8168.44
PIQA(0-shot; Acc,norm)74.2171.9376.1276.1276.6677.09
WinoGrande(0-shot; Acc)59.5158.4862.8368.9861.8071.90
CommonsenseQA(10-shot; Acc)58.4842.1076.4163.5571.7471.58
TruthfulQA(10-shot; MC2)43.8038.6646.6739.9041.4145.31
TriviaQA(5-shot; EM)37.6023.4938.3745.9734.1333.57
MMLU(5-shot; Acc)45.5839.9160.2549.2451.8253.17
HumanEval+(0-shot; Pass@1)31.1037.2050.6028.0043.9038.40
GSM8K(4-shot; EM)38.2131.1656.7945.114.4058.38
MATH-500(0-shot; EM)23.0042.0053.0017.6014.8043.40
IFEval(0-shot; Instruct-Strict)62.7166.6750.1257.9136.8153.48
MT-bench(0-shot; Average)5.436.406.125.506.575.85
Average44.9043.7455.2348.7042.0554.19
+ +Table 1: Comparison of BitNet b1.58 2B4T with leading open-weight full-precision LLMs of similar size (1B-2B parameters) on efficiency metrics and performance across a wide range of benchmarks. All models compared are instruction-tuned versions. + +More importantly, this superior memory efficiency does not compromise performance relative to the quantized models. Standard PTQ techniques lead to a noticeable degradation in performance compared to the original full-precision model. In contrast, BitNet b1.58 2B4T maintains stronger overall performance than the INT4 quantized versions of Qwen2.5-1.5B on the evaluated benchmarks. This comparison suggests that BitNet b1.58 2B4T represents a more favorable point on the efficiency-performance curve than applying conventional INT4 PTQ to existing architectures, offering better performance with lower resource usage. + +
Benchmark (Metric)Qwen2.5BitNet b1.58
1.5B-bf161.5B-GPTQ-int41.5B-AWQ-int42B
Memory +(Non-emb)2.6GB0.7GB0.7GB0.4GB
Activationbf16bf16bf16int8
MMLU +(5-shot; Acc)60.2558.0657.4353.17
GSM8K +(4-shot; EM)56.7950.5750.6458.38
IFEval +(0-shot; Instruct-Strict)50.1247.8445.4453.48
Average55.7252.1551.1755.01
+ +Table 2: Comparison of BitNet b1.58 (2B) against Qwen2.5 1.5B in its original bf16 precision and after INT4 post-training quantization (GPTQ and AWQ). All models shown are based on instruction-tuned checkpoints. + +
Benchmark (Metric)Bonsai 0.5BOLMo-Bitnet 1BFalcon3-1.58bit 7BLlama3-8B-1.58 8BBitNet b1.58 2B
Native 1-bit
ARC-Challange (0-shot; Acc,norm)33.1926.5437.8043.6949.91
ARC-Easy (0-shot; Acc,norm)58.2525.3865.0370.7174.79
OpenbookQA (0-shot; Acc,norm)33.6028.2038.2037.2041.60
BoolQ (0-shot; Acc)58.4452.4872.1468.3880.18
HellaSwag (0-shot; Acc,norm)48.0125.8859.4668.5668.44
PIQA (0-shot; Acc,norm)70.0250.4972.3675.3077.09
WinoGrande (0-shot; Acc)54.4651.5460.1460.9371.90
CommonsenseQA (10-shot; Acc)18.4319.4967.0828.5071.58
TruthfulQA (10-shot; MC2)40.6549.0543.2939.1345.31
TriviaQA (5-shot; EM)10.840.000.0019.8233.57
MMLU (5-shot; Acc)25.7425.4742.7935.0453.17
Average41.0632.2250.7649.7560.68
+ +Table 3: Performance comparison of BitNet b1.58 2B4T against other open-weight 1-bit models. This includes natively trained 1-bit models (Bonsai-0.5B, OLMo-Bitnet-1B) and larger models posttraining quantized to 1.58-bit (Falcon3-1.58bit-7B, Llama3-8B-1.58). + +# 4.3 Comparison with Open-weight 1-bit Models + +Finally, we situate BitNet b1.58 2B4T within the landscape of other models designed for or quantized to near 1-bit precision. We compare it against natively trained 1-bit models of smaller scale and significantly larger models post-training quantized to 1.58-bit precision. The comparative results are presented in Table 3. + +The evaluation clearly positions BitNet b1.58 2B4T as the leading model in this category. It demonstrates significantly stronger overall performance than all other compared 1-bit models, achieving + +the highest scores on the vast majority of benchmarks. Notably, BitNet b1.58 2B4T substantially outperforms not only the smaller, natively trained 1-bit models but also the much larger models (in terms of parameter count) that were quantized to 1-bit. This highlights the effectiveness of the native training approach employed by BitNet b1.58 2B4T, allowing it to set a new state-of-the-art performance level for models operating at this extreme level of quantization, even surpassing larger models subjected to post-training quantization. + +# 5 Inference Implementation + +Efficient inference is crucial for deploying Large Language Models, particularly for resource-constrained environments. The unique quantization scheme of BitNet b1.58 2B4T, employing 1.58-bit weights and 8-bit activations (W1.58A8), necessitates specialized implementations, as standard deep learning libraries often lack optimized kernels for such mixed-precision, low-bit formats. To address this, we developed and open-sourced dedicated inference libraries for both GPU and CPU platforms. The code is publicly available at https://aka.ms/bitnet. + +# 5.1 GPU Inference + +Current GPU architectures and their associated software libraries (e.g., cuBLAS, PyTorch kernels) are primarily optimized for operations involving standard data types like FP16, BF16, and INT8/INT4. Native, high-performance support for the specific W1.58A8 matrix multiplication required by BitNet b1.58 2B4T is generally unavailable. This limitation can hinder the realization of the theoretical efficiency gains offered by 1-bit models on existing hardware. + +To enable efficient GPU inference, we developed a custom CUDA kernel specifically designed for the W1.58A8 matrix multiplication. Since ternary weights $\{-1,0, + 1\}$ , representing 1.58 bits) cannot be stored efficiently using standard data types, we pack multiple weight values into a single 8-bit integer ('int8') for storage in High Bandwidth Memory (HBM). Specifically, four ternary values are encoded into one 'int8' value. During computation, the CUDA kernel loads the packed 'int8' weights from HBM into the GPU's faster on-chip Shared Memory (SRAM). It then unpacks these values back into a representation suitable for efficient ternary computation (e.g., reconstructing the -1, 0, +1 values) immediately before performing the matrix multiplication with the 8-bit activations. This 'pack-store-load-unpack-compute' strategy minimizes memory bandwidth usage while leveraging custom compute instructions. Further implementation details and optimization strategies are elaborated in the Ladder framework (Wang et al., 2023b). + +While our custom kernel significantly improves performance compared to naive implementations, we note that current commodity GPU architectures are not optimally designed for the 1-bit models. We believe that future hardware innovations, potentially incorporating dedicated logic for low-bit operations, will be essential to fully unlock the performance and energy efficiency potential of models like BitNet b1.58. + +# 5.2 CPU Inference + +To ensure broad accessibility and enable deployment on devices lacking powerful GPUs (e.g., edge devices, laptops, standard servers), we developed bitnet.cpp. This C++ library serves as an official reference implementation for CPU inference of 1-bit LLMs, including BitNet b1.58. + +bitnet.cpp provides optimized kernels tailored for efficient execution on standard CPU architectures. The kernels are designed to operate efficiently with the model's specific quantization scheme, avoiding the overhead of generic quantization libraries or intricate low-level bit manipulation where possible. It processes the weight elements in a manner consistent with the BitNet b1.58 training methodology, ensuring numerical accuracy (lossless inference relative to the training procedure). + +This approach delivers fast and accurate inference of 1.58-bit models directly on CPUs. More technical details and usage instructions can be found in the bitnet.cpp repository and associated technical report (Wang et al., 2025). + +# 6 Conclusion + +This technical report introduced BitNet b1.58 2B4T, a significant step towards highly efficient yet capable Large Language Models. As the first open-source, native 1-bit LLM trained at the 2-billion parameter scale on 4 trillion tokens, our work demonstrates the viability of extreme quantization directly within the training process. + +Comprehensive evaluations across benchmarks assessing language understanding, reasoning, mathematics, coding, and dialogue revealed that BitNet b1.58 2B4T achieves performance comparable to state-of-the-art open-weight, full-precision models of similar size. Crucially, this performance parity is achieved with dramatically reduced computational requirements, offering substantial savings in memory footprint, energy consumption, and inference latency. To facilitate practical use and further research, we developed and released optimized inference implementations for both GPU (via custom CUDA kernels) and CPU (via the 'bitnet.cpp' library), alongside the model weights available on Hugging Face. + +BitNet b1.58 2B4T represents a compelling proof-of-concept that challenges the necessity of full-precision weights for achieving high performance in LLMs at scale. It opens avenues for deploying powerful language models in resource-constrained environments where previous models were prohibitive, potentially democratizing access to advanced AI capabilities. + +# 7 Future Directions + +While BitNet b1.58 2B4T demonstrates promising results, several exciting research directions remain: + +- Scaling Laws and Larger Models: Investigating the scaling properties of native 1-bit LLMs is crucial. Future work will explore training larger models (e.g., 7B, 13B parameters and beyond) and training on even larger datasets to understand if the performance parity with full-precision models holds. +- Hardware Co-Design and Optimization: The full potential of 1-bit models is likely hindered by current hardware limitations. Continued development of highly optimized kernels for existing hardware (GPUs, CPUs, NPUs) is needed. Furthermore, co-designing future hardware accelerators specifically optimized for 1-bit computations and data movement could unlock orders-of-magnitude improvements in speed and energy efficiency. +- Extended Sequence Length: Extending the maximum sequence length of BitNet b1.58 2B4T can process is crucial. This enhancement is vital for tasks demanding long-context understanding, such as summarizing lengthy documents or engaging in complex problem-solving, and is particularly critical for improving performance on long chain-of-thought reasoning tasks. Investigating efficient attention mechanisms suitable for low-bit models at longer sequence lengths will be key. +- Multilingual Capabilities: The current model is primarily trained on English-centric data. Extending the pre-training corpus and potentially adapting the architecture to effectively support multiple languages is a key direction for broader applicability. +- Multimodal Integration: Exploring the integration of 1-bit principles into multimodal architectures is another promising frontier. Developing efficient ways to process and fuse information from different modalities (e.g., text and images) within a low-bit framework could enable new applications. +- Theoretical Understanding: Delving deeper into the theoretical underpinnings of why 1-bit training at scale is effective remains an open area. Analyzing the learning dynamics, loss landscapes, and representational properties of these models could yield valuable insights for future development. + +By pursuing these directions, we aim to further advance the capability and efficiency of 1-bit LLMs, paving the way for more sustainable and accessible artificial intelligence. The open-source release of BitNet b1.58 2B4T and its associated tools provides a foundation for the community to build upon these efforts. + +# References + +Allal, L. B., Lozhkov, A., Bakouch, E., Blázquez, G. M., Penedo, G., Tunstall, L., Marafioti, A., Kydlíček, H., Lajarín, A. P., Srivastav, V., Lochner, J., Fahlgren, C., Nguyen, X.-S., Fourrier, C., Burtenshaw, B., Larcher, H., Zhao, H., Zakka, C., Morlon, M., Raffel, C., von Werra, L., and Wolf, T. (2025). Smollm2: When smol goes big - data-centric training of a small language model. CoRR, abs/2502.02737. +Bai, S., Chen, K., Liu, X., Wang, J., Ge, W., Song, S., Dang, K., Wang, P., Wang, S., Tang, J., Zhong, H., Zhu, Y., Yang, M.-H., Li, Z., Wan, J., Wang, P., Ding, W., Fu, Z., Xu, Y., Ye, J., Zhang, X., Xie, T., Cheng, Z., Zhang, H., Yang, Z., Xu, H., and Lin, J. (2025). Qwen2.5-vl technical report. CoRR, abs/2502.13923. +Bisk, Y., Zellers, R., Bras, R. L., Gao, J., and Choi, Y. (2019). PIQA: reasoning about physical commonsense in natural language. CoRR, abs/1911.11641. +Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. (2019). Boolq: Exploring the surprising difficulty of natural yes/no questions. CoRR, abs/1905.10044. +Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. (2021). Training verifiers to solve math word problems. CoRR, abs/2110.14168. +Cui, G., Yuan, L., Ding, N., Yao, G., He, B., Zhu, W., Ni, Y., Xie, G., Xie, R., Lin, Y., Liu, Z., and Sun, M. (2024). ULTRAFEEDBACK: boosting language models with scaled AI feedback. In ICML. OpenReview.net. +Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Rozière, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I. M., Misra, I., Evtimov, I., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., and et al. (2024). The llama 3 herd of models. CoRR, abs/2407.21783. +Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. (2021a). Measuring massive multitask language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021. +Hendrycks, D., Burns, C., Kadavath, S., Arora, A., Basart, S., Tang, E., Song, D., and Steinhardt, J. (2021b). Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual. +Horowitz, M. (2014). 1.1 computing's energy problem (and what we can do about it). In 2014 IEEE International Conference on Solid-State Circuits Conference, ISSCC 2014, Digest of Technical Papers, San Francisco, CA, USA, February 9-13, 2014, pages 10-14. +Hsu, P.-L., Dai, Y., Kothapalli, V., Song, Q., Tang, S., Zhu, S., Shimizu, S., Sahni, S., Ning, H., and Chen, Y. (2024). Liger kernel: Efficient triton kernels for LLM training. CoRR, abs/2410.10989. +Hu, S., Tu, Y., Han, X., He, C., Cui, G., Long, X., Zheng, Z., Fang, Y., Huang, Y., Zhao, W., Zhang, X., Thai, Z. L., Zhang, K., Wang, C., Yao, Y., Zhao, C., Zhou, J., Cai, J., Zhai, Z., Ding, N., Jia, C., Zeng, G., Li, D., Liu, Z., and Sun, M. (2024). Minicpm: Unveiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395. + +Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. (2017). Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1601-1611. +Li, H., Dong, Q., Tang, Z., Wang, C., Zhang, X., Huang, H., Huang, S., Huang, X., Huang, Z., Zhang, D., Gu, Y., Cheng, X., Wang, X., Chen, S.-Q., Dong, L., Lu, W., Sui, Z., Wang, B., Lam, W., and Wei, F. (2024a). Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064. +Li, J., Fang, A., Smyrnis, G., Ivgi, M., Jordan, M., Gadre, S. Y., Bansal, H., Guha, E., Keh, S. S., Arora, K., Garg, S., Xin, R., Muennighoff, N., Heckel, R., Mercat, J., Chen, M. F., Gururangan, S., Wortsman, M., Albalak, A., Bitton, Y., Nezhurina, M., Abbas, A., Hsieh, C.-Y., Ghosh, D., Gardner, J., Kilian, M., Zhang, H., Shao, R., Pratt, S. M., Sanyal, S., Ilharco, G., Daras, G., Marathe, K., Gokaslan, A., Zhang, J., Chandu, K. R., Nguyen, T., Vasiljevic, I., Kakade, S. M., Song, S., Sanghavi, S., Faghri, F., Oh, S., Zettlemoyer, L., Lo, K., El-Nouby, A., Pouransari, H., Toshev, A., Wang, S., Groeneveld, D., Soldaini, L., Koh, P. W., Jitsev, J., Kollar, T., Dimakis, A., Carmon, Y., Dave, A., Schmidt, L., and Shankar, V. (2024b). Datacomp-lm: In search of the next generation of training sets for language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024. +Lian, W., Wang, G., Goodson, B., Pentland, E., Cook, A., Vong, C., and "Teknium" (2023). Slimorca: An open dataset of gpt-4 augmented flan reasoning traces, with verification. +Lin, S., Hilton, J., and Evans, O. (2022). Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252. +Liu, J., Xia, C. S., Wang, Y., and Zhang, L. (2023). Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. Advances in Neural Information Processing Systems, 36:21558-21572. +Ma, S., Wang, H., Ma, L., Wang, L., Wang, W., Huang, S., Dong, L., Wang, R., Xue, J., and Wei, F. (2024). The era of 1-bit llms: All large language models are in 1.58 bits. CoRR, abs/2402.17764. +Mihaylov, T., Clark, P., Khot, T., and Sabharwal, A. (2018). Can a suit of armor conduct electricity? A new dataset for open book question answering. CoRR, abs/1809.02789. +Penedo, G., Kydlícek, H., Allal, L. B., Lozhkov, A., Mitchell, M., Raffel, C. A., von Werra, L., and Wolf, T. (2024). The fineweb datasets: Decanting the web for the finest text data at scale. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024. +Rafailov, R., Sharma, A., Mitchell, E., Manning, C. D., Ermon, S., and Finn, C. (2023). Direct preference optimization: Your language model is secretly a reward model. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S., editors, Advances in Neural Information Processing Systems 36. +Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. (2020). WinoGrande: an adversarial winograd schema challenge at scale. In AAAI, pages 8732-8740. +Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. (2017). Proximal policy optimization algorithms. CoRR, abs/1707.06347. +Shao, Z., Wang, P., Zhu, Q., Xu, R., Song, J., Zhang, M., Li, Y. K., Wu, Y., and Guo, D. (2024). Deepseekmath: Pushing the limits of mathematical reasoning in open language models. CoRR, abs/2402.03300. +Shazeer, N. (2020). GLU variants improve transformer. CoRR, abs/2002.05202. + +Su, J., Ahmed, M. H. M., Lu, Y., Pan, S., Bo, W., and Liu, Y. (2024). Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063. +Talmor, A., Herzig, J., Lourie, N., and Berant, J. (2019). Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4149-4158. +Tang, Z., Zhang, X., Wang, B., and Wei, F. (2024). Mathscale: Scaling instruction tuning for mathematical reasoning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net. +Team, F.-L. (2024). The falcon 3 family of open models. +Team, G., Kamath, A., Ferret, J., Pathak, S., Vieillard, N., Merhej, R., Perrin, S., Matejovicova, T., Ram'e, A., Rivi'ere, M., et al. (2025). Gemma 3 technical report. arXiv preprint arXiv:2503.19786. +Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. In Guyon, I., von Luxburg, U., Bengio, S., Wallach, H. M., Fergus, R., Vishwanathan, S. V. N., and Garnett, R., editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008. +Wang, H., Ma, S., Dong, L., Huang, S., Wang, H., Ma, L., Yang, F., Wang, R., Wu, Y., and Wei, F. (2023a). Bitnet: Scaling 1-bit transformers for large language models. CoRR, abs/2310.11453. +Wang, H., Ma, S., Huang, S., Dong, L., Wang, W., Peng, Z., Wu, Y., Bajaj, P., Singhal, S., Benhaim, A., Patra, B., Liu, Z., Chaudhary, V., Song, X., and Wei, F. (2022). Foundation transformers. CoRR. +Wang, H., Ma, S., Wang, R., and Wei, F. (2024a). Q-sparse: All large language models can be fully sparsely-activated. CoRR, abs/2407.10969. +Wang, H., Ma, S., and Wei, F. (2024b). Bitnet a4.8: 4-bit activations for 1-bit llms. CoRR, abs/2411.04965. +Wang, J., Zhou, H., Song, T., Cao, S., Xia, Y., Cao, T., Wei, J., Ma, S., Wang, H., and Wei, F. (2025). Bitnet.cpp: Efficient edge inference for ternary lms. CoRR, abs/2502.11880. +Wang, L., Ma, L., Cao, S., Zheng, N., Zhang, Q., Xue, J., Miao, Z., Cao, T., and Yang, Y. (2023b). Ladder: Efficient tensor compilation on customized data format. In OSDI. +Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., Lin, Q., and Jiang, D. (2024a). Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net. +Xu, Y., Han, X., Yang, Z., Wang, S., Zhu, Q., Liu, Z., Liu, W., and Che, W. (2024b). Onebit: Towards extremely low-bit large language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024. +Xu, Z., Jiang, F., Niu, L., Deng, Y., Poovendran, R., Choi, Y., and Lin, B. Y. (2024c). Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. CoRR, abs/2406.08464. +Yadav, V., Bethard, S., and Surdeanu, M. (2019). Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering. In EMNLP-IJCNLP. +Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., Lin, H., Yang, J., Tu, J., Zhang, J., Yang, J., Yang, J., Zhou, J., Lin, J., Dang, K., Lu, K., Bao, K., Yang, K., Yu, L., Li, M., Xue, M., Zhang, P., Zhu, Q., Men, R., Lin, R., Li, T., Xia, T., Ren, X., Ren, X., Fan, Y., Su, Y., Zhang, Y., Wan, Y., Liu, Y., Cui, Z., Zhang, Z., and Qiu, Z. (2024). Qwen2.5 technical report. CoRR, abs/2412.15115. + +Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. (2019). HellaSwag: can a machine really finish your sentence? In Proceedings of the 57th Conference of the Association for Computational Linguistics, pages 4791-4800. +Zhang, Y., Zhang, Z., and Lew, L. (2022). PokeBNN: A binary pursuit of lightweight accuracy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12465-12475. IEEE. +Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. (2024). Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net. +Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E. P., Gonzalez, J. E., Stoica, I., and Zhang, H. (2024). Lmsys-chat-1m: A large-scale real-world LLM conversation dataset. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net. +Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. (2023). Judging lvm-as-a-judge with mt-bench and chatbot arena. In Advances in Neural Information Processing Systems 36. +Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. (2023). Instruction-following evaluation for large language models. CoRR, abs/2311.07911. + +# A Open-weight Baselines + +We summarize the links to the open-weight LLMs evaluated in this work as below: + +- LLaMA 3.2 1B: meta-llama/Llama-3.2-1B-Instruct +- Gemma-3 1B: google/gemma-3-1b-it +Qwen2.5 0.5B: Qwen/Qwen2.5-0.5B-Instruct +- Qwen2.5 1.5B: Qwen/Qwen2.5-1.5B-Instruct +- Qwen2.5 3B: Qwen/Qwen2.5-3B-Instruct +- SmolLM2 1.7B: HuggingFaceTB/SmolLM2-1.7B-Instruct +- MiniCPM 2B: openbmb/MiniCPM-2B-dpo-bf16 +- Qwen2.5 1.5B-GPTQ-int4: Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4 +Qwen2.5 1.5B-AWQ-int4: Qwen/Qwen2.5-1.5B-Instruct-AWQ +- Bonsai 0.5B: deepgrove/Bonsai +- OLMo-Bitnet 1B: NousResearch/OLMo-Bitnet-1B +- Falcon3-1.58bit 7B: tiiuae/Falcon3-7B-Instruct-1.58bit +- Llama3-8B-1.58 8B: HF1BitLLM/Llama3-8B-1.58-100B-tokens + +# B Evaluation Pipeline Details + +To ensure standardized evaluation, we employed established toolkits for different benchmark categories. Specifically: + +- For the HumanEval+ coding benchmark, we utilized the evalplus toolkit. +- For the MATH-500 mathematical reasoning benchmark, we used a customized version of the math-evaluation-harness toolkit. +- For the MT-Bench conversational benchmark, evaluation was performed using the official LLM Judge open-source codebase. +- For all other benchmarks assessing language understanding, reasoning, knowledge, and comprehension, we used the standard lm-evaluation-harness framework. + +
BitsADD EnergyMUL Energy
FP160.160.34
INT80.0070.07
+ +Table 4: ADD and MUL energy consumption (in pJ) of different precision at $7\mathrm{nm}$ process nodes. + +Models were prompted using a chat format for generative tasks (e.g., GSM8K, IFEval, and MT-Bench), while default settings from the respective toolkits were used for other tasks. + +For energy consumption, we utilize the energy model in (Horowitz, 2014; Zhang et al., 2022) to estimate the arithmetic operations energy (AOE) of matrix multiplication. The sequence length is set as 512 tokens. We present the energy consumption for ADD and MUL operation at $7\mathrm{nm}$ process nodes in Table 4. + +To assess CPU decoding performance, latency measurements were conducted on a Surface Laptop Studio 2 system powered by a 13th Gen Intel Core i7-13800H processor. The benchmarking process utilized 8 CPU threads. Specifically, the BitNet b1.58 2B4T model was tested using its bitnet.cpp implementation, whereas other models were evaluated using the llama.cpp framework. For each model, we generated 128 tokens and report the average latency per token for this task. \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12285/images/571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg b/data/2025/2504_12xxx/2504.12285/images/571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f570e99d705539431be5aae05df7a5c5824e812d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/images/571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:604d68ef59293e9bd79df3be45e945046ad6aae7555f6bbf19d5eb9edf9d48e7 +size 173863 diff --git a/data/2025/2504_12xxx/2504.12285/images/7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg b/data/2025/2504_12xxx/2504.12285/images/7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..986d9784e4775e9f29c3e18872a6a48c8bf9c00d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/images/7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0152a4ae7d760410e36011b3b10910ae5bd65eac122341c9fa7ff0268a5c0567 +size 101905 diff --git a/data/2025/2504_12xxx/2504.12285/images/c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg b/data/2025/2504_12xxx/2504.12285/images/c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a41d25d8d12a8b48f0c0ce98f864caa88af766c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/images/c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0411361340a9328567c2d9d4a0f10fe6274d9aa2313b61e717366e0e95571266 +size 12949 diff --git a/data/2025/2504_12xxx/2504.12285/images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg b/data/2025/2504_12xxx/2504.12285/images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57c5f7228fe85d27700e8cea32530fc4217fe23d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/images/e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be1bf80aa43564ddac84a7386e32e63688e7871fc0a0dd47c8fbbc39de35aa3e +size 50419 diff --git a/data/2025/2504_12xxx/2504.12285/images/ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg b/data/2025/2504_12xxx/2504.12285/images/ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a0355c1931e04d88f87fef9b5579ece0a20a336 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/images/ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6496f9cca7afd4690e82a22b63cc6e392b6e1dafcb37cefeac08f091616afaa7 +size 56369 diff --git a/data/2025/2504_12xxx/2504.12285/layout.json b/data/2025/2504_12xxx/2504.12285/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..a41e910857a80fa30ab8a8980723c63537f6751b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12285/layout.json @@ -0,0 +1,7213 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 281, + 198, + 329, + 210 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 198, + 329, + 210 + ], + "spans": [ + { + "bbox": [ + 281, + 198, + 329, + 210 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 221, + 471, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 221, + 471, + 344 + ], + "spans": [ + { + "bbox": [ + 140, + 221, + 471, + 344 + ], + "type": "text", + "content": "We introduce BitNet b1.58 2B4T, the first open-source, native 1-bit Large Language Model (LLM) at the 2-billion parameter scale. Trained on a corpus of 4 trillion tokens, the model has been rigorously evaluated across benchmarks covering language understanding, mathematical reasoning, coding proficiency, and conversational ability. Our results demonstrate that BitNet b1.58 2B4T achieves performance on par with leading open-weight, full-precision LLMs of similar size, while offering significant advantages in computational efficiency, including substantially reduced memory footprint, energy consumption, and decoding latency. To facilitate further research and adoption, the model weights are released via Hugging Face along with open-source inference implementations for both GPU and CPU architectures." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 143, + 350, + 355, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 350, + 355, + 360 + ], + "spans": [ + { + "bbox": [ + 143, + 350, + 355, + 360 + ], + "type": "text", + "content": "BitNet b1.58 2B4T (1.58-bit): bitnet-b1.58-2B-4T" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 361, + 391, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 361, + 391, + 371 + ], + "spans": [ + { + "bbox": [ + 156, + 361, + 391, + 371 + ], + "type": "text", + "content": "The packed weight of BitNet b1.58 2B4T, used for inference only" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 143, + 375, + 367, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 375, + 367, + 386 + ], + "spans": [ + { + "bbox": [ + 143, + 375, + 367, + 386 + ], + "type": "text", + "content": "BitNet b1.58 2B4T (bf16): bitnet-b1.58-2B-4T-bf16" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 156, + 386, + 386, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 386, + 386, + 397 + ], + "spans": [ + { + "bbox": [ + 156, + 386, + 386, + 397 + ], + "type": "text", + "content": "The master weight of BitNet b1.58 2B4T, used for training only" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 143, + 400, + 366, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 400, + 366, + 411 + ], + "spans": [ + { + "bbox": [ + 143, + 400, + 366, + 411 + ], + "type": "text", + "content": "BitNet b1.58 2B4T (gguf): bitnet-b1.58-2B-4T-gguf" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 156, + 411, + 376, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 411, + 376, + 423 + ], + "spans": [ + { + "bbox": [ + 156, + 411, + 376, + 423 + ], + "type": "text", + "content": "The GGUF format of BitNet b1.58 2B4T, used for bitnet.cpp" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 143, + 427, + 435, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 427, + 435, + 439 + ], + "spans": [ + { + "bbox": [ + 143, + 427, + 435, + 439 + ], + "type": "text", + "content": "BitNet b1.58 2B4T Code: bitnet.cpp Demo: aka.ms/bitnet-demo" + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 136, + 445, + 471, + 653 + ], + "blocks": [ + { + "bbox": [ + 136, + 445, + 471, + 653 + ], + "lines": [ + { + "bbox": [ + 136, + 445, + 471, + 653 + ], + "spans": [ + { + "bbox": [ + 136, + 445, + 471, + 653 + ], + "type": "image", + "image_path": "e9b0504f3305e06d140af96f6c0e0d1ce952c56b2f03e24d6adcb32b50b7eb16.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 657, + 504, + 681 + ], + "lines": [ + { + "bbox": [ + 104, + 657, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 657, + 504, + 681 + ], + "type": "text", + "content": "Figure 1: BitNet b1.58 2B4T advances the Pareto frontier defined by leading open-weight LLMs under 3B parameters in terms of performance versus memory, demonstrating superior efficiency." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 97, + 441, + 118 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 97, + 441, + 118 + ], + "spans": [ + { + "bbox": [ + 170, + 97, + 441, + 118 + ], + "type": "text", + "content": "BitNet b1.58 2B4T Technical Report" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 163, + 159, + 449, + 193 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 159, + 449, + 193 + ], + "spans": [ + { + "bbox": [ + 163, + 159, + 449, + 193 + ], + "type": "text", + "content": "Shuming Ma* Hongyu Wang* Shaohan Huang Xingxing Zhang Ying Hu Ting Song Yan Xia Furu Wei https://aka.ms/GeneralAI" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 504, + 723 + ], + "type": "text", + "content": "* Equal contribution. ⋆ Corresponding author. S. Ma, S. Huang, X. Zhang, T. Song, Y. Xia and F. Wei are with Microsoft Research. H. Wang is with University of Chinese Academy of Sciences. Y. Hu is with Tsinghua University." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 14, + 210, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 210, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 210, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.12285v2 [cs.CL] 25 Apr 2025" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 191, + 83 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 506, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 506, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 506, + 184 + ], + "type": "text", + "content": "Open-source large language models (LLMs) have become pivotal in democratizing access to advanced AI capabilities, fostering innovation, and enabling research across diverse fields such as natural language processing, code generation, and vision computing (Dubey et al., 2024; Yang et al., 2024; Bai et al., 2025). Their public availability allows for widespread experimentation and adaptation. However, a significant barrier hinders their broader adoption: the substantial computational resources required for deployment and inference. State-of-the-art open LLMs typically require large memory footprints, consume considerable energy, and exhibit notable inference latency, rendering them impractical for many edge devices, resource-constrained environments, and real-time applications." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 188, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 188, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 188, + 506, + 310 + ], + "type": "text", + "content": "1-bit LLMs, representing an extreme yet promising form of model quantization where weights and potentially activations are constrained to binary " + }, + { + "bbox": [ + 104, + 188, + 506, + 310 + ], + "type": "inline_equation", + "content": "\\{-1, +1\\}" + }, + { + "bbox": [ + 104, + 188, + 506, + 310 + ], + "type": "text", + "content": " or ternary " + }, + { + "bbox": [ + 104, + 188, + 506, + 310 + ], + "type": "inline_equation", + "content": "\\{-1, 0, +1\\}" + }, + { + "bbox": [ + 104, + 188, + 506, + 310 + ], + "type": "text", + "content": ", offer a compelling solution to the efficiency challenges. By drastically reducing the memory required to store weights and enabling highly efficient bitwise computations, they have the potential to significantly lower deployment costs, reduce energy consumption, and accelerate inference speeds. While prior work has explored 1-bit models, existing open efforts often fall into two categories: 1) post-training quantization (PTQ) methods applied to pre-trained full-precision models, which can lead to significant performance degradation (Xu et al., 2024b; Team, 2024), or 2) native 1-bit models (trained from scratch with 1-bit weights) that have been developed at relatively smaller scales (e.g., OLMo-Bitnet-1B²]) and may not yet match the capabilities of larger, full-precision counterparts. This performance gap has limited the practical impact of 1-bit LLMs thus far." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 314, + 504, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 504, + 381 + ], + "type": "text", + "content": "To bridge this gap between efficiency and performance, we introduce BitNet b1.58 2B4T, the first open-source, native 1-bit LLM trained at scale. This model, comprising 2 billion parameters, was trained from scratch on a substantial dataset of 4 trillion tokens, leveraging architectural and training innovations specific to the 1-bit paradigm. The core contribution of this work is to demonstrate that a native 1-bit LLM, when trained effectively at scale, can achieve performance comparable to leading open-weight, full-precision models of similar size across a wide range of tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 384, + 505, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 384, + 505, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 384, + 505, + 475 + ], + "type": "text", + "content": "This technical report details the development and evaluation of BitNet b1.58 2B4T. We describe the architecture and training methodology, and then present comprehensive evaluation results on standard benchmarks assessing language understanding, mathematical reasoning, coding proficiency, and multi-turn conversational abilities. Our findings confirm its strong performance relative to established full-precision baselines, coupled with significant advantages in efficiency. Finally, we announce the public release of the BitNet b1.58 2B4T model weights via Hugging Face and provide open-source inference code optimized for both GPU and CPU execution, aiming to facilitate further research and the practical deployment of highly efficient LLMs." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 489, + 192, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 489, + 192, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 489, + 192, + 502 + ], + "type": "text", + "content": "2 Architecture" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 514, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 548 + ], + "type": "text", + "content": "The architecture of BitNet b1.58 2B4T is derived from the standard Transformer model (Vaswani et al., 2017), incorporating significant modifications based on the BitNet framework (Wang et al., 2023a; Ma et al., 2024). The model is trained entirely from scratch." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 552, + 504, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 504, + 586 + ], + "type": "text", + "content": "The core architectural innovation lies in replacing the standard full-precision linear layers (torch(nn.Linear) with custom BitLinear layers. This constitutes the foundation of the BitNet approach. Within these BitLinear layers:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 594, + 504, + 701 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 132, + 594, + 504, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 594, + 504, + 638 + ], + "spans": [ + { + "bbox": [ + 132, + 594, + 504, + 638 + ], + "type": "text", + "content": "- Weight Quantization: Model weights are quantized to 1.58 bits during the forward pass. This is achieved using an absolute mean (absmean) quantization scheme, which maps weights to ternary values " + }, + { + "bbox": [ + 132, + 594, + 504, + 638 + ], + "type": "inline_equation", + "content": "\\{-1,0, + 1\\}" + }, + { + "bbox": [ + 132, + 594, + 504, + 638 + ], + "type": "text", + "content": ". This drastically reduces the model size and enables efficient mathematical operations." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 643, + 504, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 504, + 675 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 504, + 675 + ], + "type": "text", + "content": "- Activation Quantization: Activations flowing through the linear projection are quantized to 8-bit integers. This employs an absolute maximum (absmax) quantization strategy, applied per-token." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 679, + 504, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 679, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 132, + 679, + 504, + 701 + ], + "type": "text", + "content": "- Normalization: We incorporate subln normalization (Wang et al., 2022) to further enhance training stability, which can be particularly beneficial in quantized training regimes." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 710, + 361, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 710, + 361, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 710, + 361, + 722 + ], + "type": "text", + "content": "2https://huggingface.co/NousResearch/OLMo-Bitnet-1B" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "content": "Beyond the BitLinear layers, several established LLM techniques are integrated to enhance performance and stability:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 104, + 503, + 211 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 132, + 104, + 503, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 104, + 503, + 146 + ], + "spans": [ + { + "bbox": [ + 132, + 104, + 503, + 146 + ], + "type": "text", + "content": "- Activation Function (FFN): Within the feed-forward network (FFN) sub-layers, instead of the commonly used SwiGLU activation (Shazeer, 2020), BitNet b1.58 2B4T employs squared ReLU " + }, + { + "bbox": [ + 132, + 104, + 503, + 146 + ], + "type": "inline_equation", + "content": "(\\mathrm{ReLU}^2)" + }, + { + "bbox": [ + 132, + 104, + 503, + 146 + ], + "type": "text", + "content": ". This choice is motivated by its potential to improve model sparsity and computational characteristics within the 1-bit context (Wang et al., 2024b,a)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 152, + 503, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 152, + 503, + 174 + ], + "spans": [ + { + "bbox": [ + 132, + 152, + 503, + 174 + ], + "type": "text", + "content": "- **Positional Embeddings:** Rotary Position Embeddings (RoPE) (Su et al., 2024) are used to inject positional information, a standard practice in modern high-performance LLMs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 179, + 503, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 179, + 503, + 211 + ], + "spans": [ + { + "bbox": [ + 132, + 179, + 503, + 211 + ], + "type": "text", + "content": "- Bias Removal: Consistent with architectures like LLaMA, all bias terms are removed from the linear layers and normalization layers throughout the network, reducing parameter count and potentially simplifying quantization." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 222, + 504, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 222, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 504, + 266 + ], + "type": "text", + "content": "For tokenization, we adopt the tokenizer developed for LLaMA 3 (Dubey et al., 2024). This tokenizer implements a byte-level Byte-Pair Encoding (BPE) scheme with a vocabulary size of 128,256 tokens. This choice ensures robust handling of diverse text and code, and its widespread adoption facilitates straightforward integration with existing open-source tooling and ecosystems." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 283, + 171, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 283, + 171, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 283, + 171, + 297 + ], + "type": "text", + "content": "3 Training" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 308, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 384 + ], + "type": "text", + "content": "The training process for BitNet b1.58 2B4T involved three distinct phases: large-scale pre-training followed by supervised fine-tuning (SFT) and direct preference optimization (DPO). While advanced techniques like Proximal Policy Optimization (PPO) or Group Relative Policy Optimization (GRPO) can further enhance capabilities such as mathematics and chain-of-thought reasoning (Schulman et al., 2017; Shao et al., 2024), the current version of BitNet b1.58 2B4T relies solely on pre-training, SFT, and DPO. The exploration of reinforcement learning methods remains a direction for future work." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 399, + 184, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 184, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 184, + 411 + ], + "type": "text", + "content": "3.1 Pre-training" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 419, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 419, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 419, + 504, + 453 + ], + "type": "text", + "content": "The pre-training phase aimed to imbue the model with broad world knowledge and foundational language capabilities. We adapted general training strategies from established LLM practices (Dubey et al., 2024), with specific adjustments tailored for the 1-bit architecture." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 465, + 241, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 465, + 241, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 465, + 241, + 477 + ], + "type": "text", + "content": "3.1.1 Learning Rate Schedule" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 485, + 308, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 485, + 308, + 497 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 308, + 497 + ], + "type": "text", + "content": "A two-stage learning rate schedule was employed." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 127, + 506, + 503, + 596 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 129, + 506, + 503, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 506, + 503, + 548 + ], + "spans": [ + { + "bbox": [ + 129, + 506, + 503, + 548 + ], + "type": "text", + "content": "1. **Stage 1 (High Learning Rate):** The initial phase utilized a standard cosine decay schedule but commenced with a relatively high peak learning rate. This decision was informed by the observation that 1-bit models often exhibit greater training stability compared to their full-precision counterparts, allowing for more aggressive initial learning steps." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 127, + 554, + 503, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 554, + 503, + 596 + ], + "spans": [ + { + "bbox": [ + 127, + 554, + 503, + 596 + ], + "type": "text", + "content": "2. **Stage 2 (Cooldown):** Approximately midway through the planned training token count, the learning rate was abruptly decayed and subsequently maintained via a cosine schedule with a significantly lower peak value. This \"cooldown\" phase allows the model to refine its representations on higher-quality data (see Section 3.1.3)." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 611, + 238, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 238, + 622 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 238, + 622 + ], + "type": "text", + "content": "3.1.2 Weight Decay Schedule" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 630, + 503, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 630, + 503, + 642 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 503, + 642 + ], + "type": "text", + "content": "Complementing the learning rate adjustments, a two-stage weight decay strategy was implemented." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 127, + 651, + 503, + 719 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 129, + 651, + 503, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 651, + 503, + 681 + ], + "spans": [ + { + "bbox": [ + 129, + 651, + 503, + 681 + ], + "type": "text", + "content": "1. **Stage 1:** During the first training stage, weight decay followed a cosine schedule, reaching a peak value of 0.1. This regularization helps prevent overfitting during the initial high learning-rate phase." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 127, + 689, + 503, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 689, + 503, + 719 + ], + "spans": [ + { + "bbox": [ + 127, + 689, + 503, + 719 + ], + "type": "text", + "content": "2. **Stage 2:** In the second stage, weight decay was effectively disabled (set to zero). This allows the model parameters to settle into finer-grained optima guided by the lower learning rate and curated data." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 214, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 214, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 214, + 84 + ], + "type": "text", + "content": "3.1.3 Pre-training Data" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 91, + 504, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 91, + 504, + 158 + ], + "spans": [ + { + "bbox": [ + 104, + 91, + 504, + 158 + ], + "type": "text", + "content": "The pre-training corpus comprised a mixture of publicly available text and code datasets, including large web crawls like DCLM (Li et al., 2024b) and educational web pages like FineWeb-EDU (Penedo et al., 2024). To enhance mathematical reasoning abilities, we also incorporated synthetically generated mathematical data. The data presentation strategy aligned with the two-stage training: the bulk of general web data was processed during Stage 1, while higher-quality curated datasets were emphasized during the Stage 2 cooldown phase, coinciding with the reduced learning rate." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 170, + 257, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 170, + 257, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 257, + 182 + ], + "type": "text", + "content": "3.2 Supervised Fine-tuning (SFT)" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 190, + 506, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 190, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 506, + 213 + ], + "type": "text", + "content": "Following pre-training, the model underwent supervised fine-tuning (SFT) to enhance its instruction-following capabilities and improve its performance in conversational interaction formats." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 224, + 180, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 224, + 180, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 224, + 180, + 236 + ], + "type": "text", + "content": "3.2.1 SFT Data" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 243, + 506, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 310 + ], + "type": "text", + "content": "The SFT phase utilized a diverse collection of publicly available instruction-following and conversational datasets. These included, but were not limited to, WildChat (Zhao et al., 2024), LMSYS-Chat1M (Zheng et al., 2024), WizardLM Evol-Instruct (Xu et al., 2024a), and SlimOrca (Lian et al., 2023). To further bolster specific capabilities, particularly in reasoning and complex instruction adherence, we supplemented these with synthetic datasets generated using methodologies like GLAN (Li et al., 2024a) and MathScale (Tang et al., 2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 321, + 202, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 321, + 202, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 202, + 333 + ], + "type": "text", + "content": "3.2.2 Chat Template" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 363 + ], + "type": "text", + "content": "For conversational tasks during SFT and inference, the following chat template structure was employed:" + } + ] + } + ], + "index": 7 + }, + { + "type": "code", + "bbox": [ + 104, + 371, + 375, + 427 + ], + "blocks": [ + { + "bbox": [ + 104, + 371, + 375, + 427 + ], + "lines": [ + { + "bbox": [ + 104, + 371, + 375, + 427 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 375, + 427 + ], + "type": "text", + "content": "<|begin_of_text|>System: {system_message}<|eot_id|>\nUser: {user_message_1}<|eot_id|\nAssistant: {assistant_message_1}<|eot_id|\nUser: {user_message_2}<|eot_id|\nAssistant: {assistant_message_2}<|eot_id|..." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 438, + 227, + 450 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 438, + 227, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 227, + 450 + ], + "type": "text", + "content": "3.2.3 Optimization Details" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 457, + 312, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 312, + 469 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 312, + 469 + ], + "type": "text", + "content": "Several optimization choices were key during SFT:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 478, + 505, + 570 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 132, + 478, + 504, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 478, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 132, + 478, + 504, + 512 + ], + "type": "text", + "content": "- Loss Aggregation: Instead of averaging the cross-entropy loss across tokens within a batch (mean reduction), we employed summation. Empirically, we observed that summing the losses led to improved convergence and better final performance for this model." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 514, + 505, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 514, + 505, + 570 + ], + "spans": [ + { + "bbox": [ + 132, + 514, + 505, + 570 + ], + "type": "text", + "content": "- Hyperparameter Tuning: Careful tuning of the learning rate and the number of training epochs was performed. Consistent with our pre-training findings, the 1-bit model benefited from a relatively larger learning rate during SFT compared to typical full-precision model fine-tuning. Furthermore, achieving optimal convergence required extending the fine-tuning duration over a larger number of epochs than full-precision models of similar size." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 582, + 294, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 582, + 294, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 294, + 595 + ], + "type": "text", + "content": "3.3 Direct Preference Optimization (DPO)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 603, + 505, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 603, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 603, + 505, + 669 + ], + "type": "text", + "content": "To further align the model's behavior with human preferences regarding helpfulness and safety, we applied Direct Preference Optimization (DPO) (Rafailov et al., 2023) following the SFT phase. DPO offers an efficient alternative to traditional RLHF by directly optimizing the language model using preference data, thereby circumventing the need to train a separate reward model. This DPO stage served to refine the model's conversational prowess and overall alignment with desired interaction patterns in practical use cases." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 681, + 198, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 681, + 198, + 693 + ], + "spans": [ + { + "bbox": [ + 105, + 681, + 198, + 693 + ], + "type": "text", + "content": "3.3.1 Training Data" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": "The preference dataset used for DPO training was constructed from a combination of publicly available resources recognized for capturing diverse human judgments on model outputs. Specifically," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "we utilized UltraFeedback (Cui et al., 2024) and MagPie (Xu et al., 2024c). The aggregation of these datasets provided a robust and multifaceted preference signal, guiding the model towards generating responses more aligned with human expectations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 117, + 208, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 117, + 208, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 117, + 208, + 129 + ], + "type": "text", + "content": "3.3.2 Training Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 135, + 506, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 135, + 506, + 202 + ], + "spans": [ + { + "bbox": [ + 104, + 135, + 506, + 202 + ], + "type": "text", + "content": "The DPO training phase was conducted for 2 epochs. We employed a learning rate of " + }, + { + "bbox": [ + 104, + 135, + 506, + 202 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-7}" + }, + { + "bbox": [ + 104, + 135, + 506, + 202 + ], + "type": "text", + "content": " and set the DPO beta parameter, which controls the divergence from the reference policy, to 0.1. To enhance training efficiency during this phase, we integrated optimized kernels from the Liger Kernel library (Hsu et al., 2024). Qualitatively, our observations indicate that the DPO process effectively steered the model towards preferred response styles without inducing significant degradation in the core capabilities established during pre-training and SFT." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 217, + 182, + 229 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 217, + 182, + 229 + ], + "spans": [ + { + "bbox": [ + 105, + 217, + 182, + 229 + ], + "type": "text", + "content": "4 Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 240, + 428, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 240, + 428, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 428, + 253 + ], + "type": "text", + "content": "We measure performance on a wide variety of benchmarks classified as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 261, + 504, + 384 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 132, + 261, + 504, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 261, + 504, + 304 + ], + "spans": [ + { + "bbox": [ + 132, + 261, + 504, + 304 + ], + "type": "text", + "content": "- Language understanding and reasoning: ARC-Easy (Yadav et al., 2019), ARC-Challenge (Yadav et al., 2019), HellaSwag (Zellers et al., 2019), WinoGrande (Sakaguchi et al., 2020), PIQA (Bisk et al., 2019), OpenbookQA (Mihaylov et al., 2018), and CommonsenseQA (Talmor et al., 2019)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 308, + 495, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 308, + 495, + 319 + ], + "spans": [ + { + "bbox": [ + 132, + 308, + 495, + 319 + ], + "type": "text", + "content": "- World knowledge: TruthfulQA (Lin et al., 2022) and MMLU (Hendrycks et al., 2021a)" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 322, + 492, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 322, + 492, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 322, + 492, + 335 + ], + "type": "text", + "content": "- Reading comprehension: TriviaQA (Joshi et al., 2017) and BoolQ (Clark et al., 2019)" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 337, + 504, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 337, + 504, + 359 + ], + "spans": [ + { + "bbox": [ + 132, + 337, + 504, + 359 + ], + "type": "text", + "content": "- Math and code: GSM8K (Cobbe et al., 2021), MATH-500 (Hendrycks et al., 2021b) and HumanEval+ (Liu et al., 2023)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 362, + 504, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 362, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 132, + 362, + 504, + 384 + ], + "type": "text", + "content": "- Instruction following and conversation: IFEval (Zhou et al., 2023) and MT-bench (Zheng et al., 2023)" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 393, + 506, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 393, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 104, + 393, + 506, + 460 + ], + "type": "text", + "content": "We compare BitNet b1.58 2B4T with leading open-weight full precision LLMs of similar size, including LLaMA 3.2 1B (Dubey et al., 2024), Gemma-3 1B (Team et al., 2025), Qwen2.5 1.5B (Yang et al., 2024), SmolLM2 1.7B (Allal et al., 2025) and MiniCPM 2B (Hu et al., 2024). All models are instruction-tuned versions. We re-run all benchmarks with a public evaluation pipeline for a fair comparison. More evaluation details are available at the appendix. The main results are presented in Table 1." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 472, + 187, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 472, + 187, + 483 + ], + "spans": [ + { + "bbox": [ + 105, + 472, + 187, + 483 + ], + "type": "text", + "content": "4.1 Main Results" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 491, + 506, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 506, + 547 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 506, + 547 + ], + "type": "text", + "content": "As shown in Table 1, BitNet b1.58 2B4T demonstrates remarkable resource efficiency. Its non-embedding memory footprint and estimated energy consumption (Horowitz, 2014; Zhang et al., 2022) during decoding are substantially lower compared to all the full-precision models evaluated, highlighting a significant advantage in operational cost and deployability on resource-constrained devices." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 552, + 506, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 629 + ], + "type": "text", + "content": "In terms of task performance, BitNet b1.58 2B4T proves highly competitive. It achieves the best results among the compared models on several benchmarks spanning reasoning, knowledge, and math capabilities. On other benchmarks, its performance is closely comparable to the top-performing full-precision models. While some full-precision models show slight advantages on specific tasks or the overall average, BitNet b1.58 2B4T delivers strong performance across the board. The results indicate that BitNet b1.58 2B4T achieves capabilities nearly on par with leading models in its size class while offering dramatically improved efficiency." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 641, + 344, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 641, + 344, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 641, + 344, + 654 + ], + "type": "text", + "content": "4.2 Comparison with Post-training Quantized Models" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 661, + 505, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 661, + 505, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 505, + 696 + ], + "type": "text", + "content": "We further investigate the efficiency-performance trade-off by comparing BitNet b1.58 2B4T against post-training quantized (PTQ) versions of a leading competitor, Qwen2.5 1.5B, using standard INT4 methods (GPTQ and AWQ). The results are summarized in Table 2." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 699, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 699, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 699, + 507, + 723 + ], + "type": "text", + "content": "While INT4 quantization successfully reduces the memory footprint of the full-precision model, BitNet b1.58 2B4T achieves an even lower memory requirement due to its native 1-bit architecture." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 69, + 497, + 533 + ], + "blocks": [ + { + "bbox": [ + 111, + 69, + 497, + 533 + ], + "lines": [ + { + "bbox": [ + 111, + 69, + 497, + 533 + ], + "spans": [ + { + "bbox": [ + 111, + 69, + 497, + 533 + ], + "type": "table", + "html": "
Benchmark (Metric)LLaMA 3.21BGemma-31BQwen2.51.5BSmolLM21.7BMiniCPM2BBitNet b1.582B
Memory(Non-emb)2GB1.4GB2.6GB3.2GB4.8GB0.4GB
Latency(CPU; TPOT)48ms41ms65ms67ms124ms29ms
Energy(Estimated)0.258J0.186J0.347J0.425J0.649J0.028J
Training Tokens(Pre-training)9T(pruning & distillation)2T(distillation)18T11T1.1T4T
ARC-Challange(0-shot; Acc,norm)37.8038.4046.6743.5244.8049.91
ARC-Easy(0-shot; Acc,norm)63.1763.1376.0162.9272.1474.79
OpenbookQA(0-shot; Acc,norm)34.8038.8040.8046.0040.2041.60
BoolQ(0-shot; Acc)64.6574.2278.0475.7880.6780.18
HellaSwag(0-shot; Acc,norm)60.8057.6968.2871.7170.8168.44
PIQA(0-shot; Acc,norm)74.2171.9376.1276.1276.6677.09
WinoGrande(0-shot; Acc)59.5158.4862.8368.9861.8071.90
CommonsenseQA(10-shot; Acc)58.4842.1076.4163.5571.7471.58
TruthfulQA(10-shot; MC2)43.8038.6646.6739.9041.4145.31
TriviaQA(5-shot; EM)37.6023.4938.3745.9734.1333.57
MMLU(5-shot; Acc)45.5839.9160.2549.2451.8253.17
HumanEval+(0-shot; Pass@1)31.1037.2050.6028.0043.9038.40
GSM8K(4-shot; EM)38.2131.1656.7945.114.4058.38
MATH-500(0-shot; EM)23.0042.0053.0017.6014.8043.40
IFEval(0-shot; Instruct-Strict)62.7166.6750.1257.9136.8153.48
MT-bench(0-shot; Average)5.436.406.125.506.575.85
Average44.9043.7455.2348.7042.0554.19
", + "image_path": "571146886c535edf30d81d1772d84f416f8ac854969e5314285b8b400728c4d3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 536, + 504, + 570 + ], + "lines": [ + { + "bbox": [ + 104, + 536, + 504, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 504, + 570 + ], + "type": "text", + "content": "Table 1: Comparison of BitNet b1.58 2B4T with leading open-weight full-precision LLMs of similar size (1B-2B parameters) on efficiency metrics and performance across a wide range of benchmarks. All models compared are instruction-tuned versions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 644, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 644, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 504, + 723 + ], + "type": "text", + "content": "More importantly, this superior memory efficiency does not compromise performance relative to the quantized models. Standard PTQ techniques lead to a noticeable degradation in performance compared to the original full-precision model. In contrast, BitNet b1.58 2B4T maintains stronger overall performance than the INT4 quantized versions of Qwen2.5-1.5B on the evaluated benchmarks. This comparison suggests that BitNet b1.58 2B4T represents a more favorable point on the efficiency-performance curve than applying conventional INT4 PTQ to existing architectures, offering better performance with lower resource usage." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 125, + 70, + 484, + 224 + ], + "blocks": [ + { + "bbox": [ + 125, + 70, + 484, + 224 + ], + "lines": [ + { + "bbox": [ + 125, + 70, + 484, + 224 + ], + "spans": [ + { + "bbox": [ + 125, + 70, + 484, + 224 + ], + "type": "table", + "html": "
Benchmark (Metric)Qwen2.5BitNet b1.58
1.5B-bf161.5B-GPTQ-int41.5B-AWQ-int42B
Memory \n(Non-emb)2.6GB0.7GB0.7GB0.4GB
Activationbf16bf16bf16int8
MMLU \n(5-shot; Acc)60.2558.0657.4353.17
GSM8K \n(4-shot; EM)56.7950.5750.6458.38
IFEval \n(0-shot; Instruct-Strict)50.1247.8445.4453.48
Average55.7252.1551.1755.01
", + "image_path": "ef5eaeee5358d095388e3666899372dcd05f08f5db7a5f88b8a1fcf76af24244.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 118, + 281, + 491, + 564 + ], + "blocks": [ + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 264 + ], + "type": "text", + "content": "Table 2: Comparison of BitNet b1.58 (2B) against Qwen2.5 1.5B in its original bf16 precision and after INT4 post-training quantization (GPTQ and AWQ). All models shown are based on instruction-tuned checkpoints." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 118, + 281, + 491, + 564 + ], + "lines": [ + { + "bbox": [ + 118, + 281, + 491, + 564 + ], + "spans": [ + { + "bbox": [ + 118, + 281, + 491, + 564 + ], + "type": "table", + "html": "
Benchmark (Metric)Bonsai 0.5BOLMo-Bitnet 1BFalcon3-1.58bit 7BLlama3-8B-1.58 8BBitNet b1.58 2B
Native 1-bit
ARC-Challange (0-shot; Acc,norm)33.1926.5437.8043.6949.91
ARC-Easy (0-shot; Acc,norm)58.2525.3865.0370.7174.79
OpenbookQA (0-shot; Acc,norm)33.6028.2038.2037.2041.60
BoolQ (0-shot; Acc)58.4452.4872.1468.3880.18
HellaSwag (0-shot; Acc,norm)48.0125.8859.4668.5668.44
PIQA (0-shot; Acc,norm)70.0250.4972.3675.3077.09
WinoGrande (0-shot; Acc)54.4651.5460.1460.9371.90
CommonsenseQA (10-shot; Acc)18.4319.4967.0828.5071.58
TruthfulQA (10-shot; MC2)40.6549.0543.2939.1345.31
TriviaQA (5-shot; EM)10.840.000.0019.8233.57
MMLU (5-shot; Acc)25.7425.4742.7935.0453.17
Average41.0632.2250.7649.7560.68
", + "image_path": "7fdcbcc3b50ac408ac7c07af7c01e1a337e1a44a092a38fa5c43f53314bca52d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 567, + 507, + 602 + ], + "lines": [ + { + "bbox": [ + 104, + 567, + 507, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 507, + 602 + ], + "type": "text", + "content": "Table 3: Performance comparison of BitNet b1.58 2B4T against other open-weight 1-bit models. This includes natively trained 1-bit models (Bonsai-0.5B, OLMo-Bitnet-1B) and larger models posttraining quantized to 1.58-bit (Falcon3-1.58bit-7B, Llama3-8B-1.58)." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 629, + 319, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 319, + 642 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 319, + 642 + ], + "type": "text", + "content": "4.3 Comparison with Open-weight 1-bit Models" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 650, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 504, + 696 + ], + "type": "text", + "content": "Finally, we situate BitNet b1.58 2B4T within the landscape of other models designed for or quantized to near 1-bit precision. We compare it against natively trained 1-bit models of smaller scale and significantly larger models post-training quantized to 1.58-bit precision. The comparative results are presented in Table 3." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 700, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 507, + 723 + ], + "type": "text", + "content": "The evaluation clearly positions BitNet b1.58 2B4T as the leading model in this category. It demonstrates significantly stronger overall performance than all other compared 1-bit models, achieving" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "the highest scores on the vast majority of benchmarks. Notably, BitNet b1.58 2B4T substantially outperforms not only the smaller, natively trained 1-bit models but also the much larger models (in terms of parameter count) that were quantized to 1-bit. This highlights the effectiveness of the native training approach employed by BitNet b1.58 2B4T, allowing it to set a new state-of-the-art performance level for models operating at this extreme level of quantization, even surpassing larger models subjected to post-training quantization." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 163, + 258, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 258, + 176 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 258, + 176 + ], + "type": "text", + "content": "5 Inference Implementation" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 193, + 506, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 259 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 259 + ], + "type": "text", + "content": "Efficient inference is crucial for deploying Large Language Models, particularly for resource-constrained environments. The unique quantization scheme of BitNet b1.58 2B4T, employing 1.58-bit weights and 8-bit activations (W1.58A8), necessitates specialized implementations, as standard deep learning libraries often lack optimized kernels for such mixed-precision, low-bit formats. To address this, we developed and open-sourced dedicated inference libraries for both GPU and CPU platforms. The code is publicly available at https://aka.ms/bitnet." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 281, + 194, + 292 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 194, + 292 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 194, + 292 + ], + "type": "text", + "content": "5.1 GPU Inference" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 304, + 504, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 360 + ], + "type": "text", + "content": "Current GPU architectures and their associated software libraries (e.g., cuBLAS, PyTorch kernels) are primarily optimized for operations involving standard data types like FP16, BF16, and INT8/INT4. Native, high-performance support for the specific W1.58A8 matrix multiplication required by BitNet b1.58 2B4T is generally unavailable. This limitation can hinder the realization of the theoretical efficiency gains offered by 1-bit models on existing hardware." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 364, + 505, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 505, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 505, + 485 + ], + "type": "text", + "content": "To enable efficient GPU inference, we developed a custom CUDA kernel specifically designed for the W1.58A8 matrix multiplication. Since ternary weights " + }, + { + "bbox": [ + 104, + 364, + 505, + 485 + ], + "type": "inline_equation", + "content": "\\{-1,0, + 1\\}" + }, + { + "bbox": [ + 104, + 364, + 505, + 485 + ], + "type": "text", + "content": ", representing 1.58 bits) cannot be stored efficiently using standard data types, we pack multiple weight values into a single 8-bit integer ('int8') for storage in High Bandwidth Memory (HBM). Specifically, four ternary values are encoded into one 'int8' value. During computation, the CUDA kernel loads the packed 'int8' weights from HBM into the GPU's faster on-chip Shared Memory (SRAM). It then unpacks these values back into a representation suitable for efficient ternary computation (e.g., reconstructing the -1, 0, +1 values) immediately before performing the matrix multiplication with the 8-bit activations. This 'pack-store-load-unpack-compute' strategy minimizes memory bandwidth usage while leveraging custom compute instructions. Further implementation details and optimization strategies are elaborated in the Ladder framework (Wang et al., 2023b)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 490, + 504, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 504, + 544 + ], + "type": "text", + "content": "While our custom kernel significantly improves performance compared to naive implementations, we note that current commodity GPU architectures are not optimally designed for the 1-bit models. We believe that future hardware innovations, potentially incorporating dedicated logic for low-bit operations, will be essential to fully unlock the performance and energy efficiency potential of models like BitNet b1.58." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 567, + 194, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 194, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 194, + 578 + ], + "type": "text", + "content": "5.2 CPU Inference" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 590, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 624 + ], + "type": "text", + "content": "To ensure broad accessibility and enable deployment on devices lacking powerful GPUs (e.g., edge devices, laptops, standard servers), we developed bitnet.cpp. This C++ library serves as an official reference implementation for CPU inference of 1-bit LLMs, including BitNet b1.58." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 628, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 684 + ], + "type": "text", + "content": "bitnet.cpp provides optimized kernels tailored for efficient execution on standard CPU architectures. The kernels are designed to operate efficiently with the model's specific quantization scheme, avoiding the overhead of generic quantization libraries or intricate low-level bit manipulation where possible. It processes the weight elements in a manner consistent with the BitNet b1.58 training methodology, ensuring numerical accuracy (lossless inference relative to the training procedure)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "This approach delivers fast and accurate inference of 1.58-bit models directly on CPUs. More technical details and usage instructions can be found in the bitnet.cpp repository and associated technical report (Wang et al., 2025)." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 185, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 185, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 185, + 85 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 97, + 504, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 97, + 504, + 141 + ], + "spans": [ + { + "bbox": [ + 104, + 97, + 504, + 141 + ], + "type": "text", + "content": "This technical report introduced BitNet b1.58 2B4T, a significant step towards highly efficient yet capable Large Language Models. As the first open-source, native 1-bit LLM trained at the 2-billion parameter scale on 4 trillion tokens, our work demonstrates the viability of extreme quantization directly within the training process." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 146, + 506, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 146, + 506, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 146, + 506, + 236 + ], + "type": "text", + "content": "Comprehensive evaluations across benchmarks assessing language understanding, reasoning, mathematics, coding, and dialogue revealed that BitNet b1.58 2B4T achieves performance comparable to state-of-the-art open-weight, full-precision models of similar size. Crucially, this performance parity is achieved with dramatically reduced computational requirements, offering substantial savings in memory footprint, energy consumption, and inference latency. To facilitate practical use and further research, we developed and released optimized inference implementations for both GPU (via custom CUDA kernels) and CPU (via the 'bitnet.cpp' library), alongside the model weights available on Hugging Face." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 239, + 506, + 284 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 506, + 284 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 506, + 284 + ], + "type": "text", + "content": "BitNet b1.58 2B4T represents a compelling proof-of-concept that challenges the necessity of full-precision weights for achieving high performance in LLMs at scale. It opens avenues for deploying powerful language models in resource-constrained environments where previous models were prohibitive, potentially democratizing access to advanced AI capabilities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 300, + 217, + 314 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 217, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 217, + 314 + ], + "type": "text", + "content": "7 Future Directions" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 327, + 506, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 340 + ], + "type": "text", + "content": "While BitNet b1.58 2B4T demonstrates promising results, several exciting research directions remain:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 350, + 506, + 666 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 132, + 350, + 504, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 350, + 504, + 394 + ], + "spans": [ + { + "bbox": [ + 132, + 350, + 504, + 394 + ], + "type": "text", + "content": "- Scaling Laws and Larger Models: Investigating the scaling properties of native 1-bit LLMs is crucial. Future work will explore training larger models (e.g., 7B, 13B parameters and beyond) and training on even larger datasets to understand if the performance parity with full-precision models holds." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 132, + 400, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 400, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 132, + 400, + 504, + 456 + ], + "type": "text", + "content": "- Hardware Co-Design and Optimization: The full potential of 1-bit models is likely hindered by current hardware limitations. Continued development of highly optimized kernels for existing hardware (GPUs, CPUs, NPUs) is needed. Furthermore, co-designing future hardware accelerators specifically optimized for 1-bit computations and data movement could unlock orders-of-magnitude improvements in speed and energy efficiency." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 132, + 461, + 506, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 461, + 506, + 528 + ], + "spans": [ + { + "bbox": [ + 132, + 461, + 506, + 528 + ], + "type": "text", + "content": "- Extended Sequence Length: Extending the maximum sequence length of BitNet b1.58 2B4T can process is crucial. This enhancement is vital for tasks demanding long-context understanding, such as summarizing lengthy documents or engaging in complex problem-solving, and is particularly critical for improving performance on long chain-of-thought reasoning tasks. Investigating efficient attention mechanisms suitable for low-bit models at longer sequence lengths will be key." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 533, + 506, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 533, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 132, + 533, + 506, + 567 + ], + "type": "text", + "content": "- Multilingual Capabilities: The current model is primarily trained on English-centric data. Extending the pre-training corpus and potentially adapting the architecture to effectively support multiple languages is a key direction for broader applicability." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 571, + 504, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 571, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 132, + 571, + 504, + 616 + ], + "type": "text", + "content": "- Multimodal Integration: Exploring the integration of 1-bit principles into multimodal architectures is another promising frontier. Developing efficient ways to process and fuse information from different modalities (e.g., text and images) within a low-bit framework could enable new applications." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 622, + 506, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 622, + 506, + 666 + ], + "spans": [ + { + "bbox": [ + 132, + 622, + 506, + 666 + ], + "type": "text", + "content": "- Theoretical Understanding: Delving deeper into the theoretical underpinnings of why 1-bit training at scale is effective remains an open area. Analyzing the learning dynamics, loss landscapes, and representational properties of these models could yield valuable insights for future development." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "By pursuing these directions, we aim to further advance the capability and efficiency of 1-bit LLMs, paving the way for more sustainable and accessible artificial intelligence. The open-source release of BitNet b1.58 2B4T and its associated tools provides a foundation for the community to build upon these efforts." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 91, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 106, + 91, + 507, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 91, + 507, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 91, + 507, + 146 + ], + "type": "text", + "content": "Allal, L. B., Lozhkov, A., Bakouch, E., Blázquez, G. M., Penedo, G., Tunstall, L., Marafioti, A., Kydlíček, H., Lajarín, A. P., Srivastav, V., Lochner, J., Fahlgren, C., Nguyen, X.-S., Fourrier, C., Burtenshaw, B., Larcher, H., Zhao, H., Zakka, C., Morlon, M., Raffel, C., von Werra, L., and Wolf, T. (2025). Smollm2: When smol goes big - data-centric training of a small language model. CoRR, abs/2502.02737." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 155, + 507, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 155, + 507, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 155, + 507, + 199 + ], + "type": "text", + "content": "Bai, S., Chen, K., Liu, X., Wang, J., Ge, W., Song, S., Dang, K., Wang, P., Wang, S., Tang, J., Zhong, H., Zhu, Y., Yang, M.-H., Li, Z., Wan, J., Wang, P., Ding, W., Fu, Z., Xu, Y., Ye, J., Zhang, X., Xie, T., Cheng, Z., Zhang, H., Yang, Z., Xu, H., and Lin, J. (2025). Qwen2.5-vl technical report. CoRR, abs/2502.13923." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 208, + 505, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 208, + 505, + 232 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 505, + 232 + ], + "type": "text", + "content": "Bisk, Y., Zellers, R., Bras, R. L., Gao, J., and Choi, Y. (2019). PIQA: reasoning about physical commonsense in natural language. CoRR, abs/1911.11641." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 239, + 505, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 239, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 106, + 239, + 505, + 263 + ], + "type": "text", + "content": "Clark, C., Lee, K., Chang, M.-W., Kwiatkowski, T., Collins, M., and Toutanova, K. (2019). Boolq: Exploring the surprising difficulty of natural yes/no questions. CoRR, abs/1905.10044." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 271, + 507, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 271, + 507, + 304 + ], + "spans": [ + { + "bbox": [ + 106, + 271, + 507, + 304 + ], + "type": "text", + "content": "Cobbe, K., Kosaraju, V., Bavarian, M., Chen, M., Jun, H., Kaiser, L., Plappert, M., Tworek, J., Hilton, J., Nakano, R., Hesse, C., and Schulman, J. (2021). Training verifiers to solve math word problems. CoRR, abs/2110.14168." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 313, + 505, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 313, + 505, + 348 + ], + "spans": [ + { + "bbox": [ + 106, + 313, + 505, + 348 + ], + "type": "text", + "content": "Cui, G., Yuan, L., Ding, N., Yao, G., He, B., Zhu, W., Ni, Y., Xie, G., Xie, R., Lin, Y., Liu, Z., and Sun, M. (2024). ULTRAFEEDBACK: boosting language models with scaled AI feedback. In ICML. OpenReview.net." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 356, + 507, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 356, + 507, + 499 + ], + "spans": [ + { + "bbox": [ + 106, + 356, + 507, + 499 + ], + "type": "text", + "content": "Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., Goyal, A., Hartshorn, A., Yang, A., Mitra, A., Sravankumar, A., Korenev, A., Hinsvark, A., Rao, A., Zhang, A., Rodriguez, A., Gregerson, A., Spataru, A., Rozière, B., Biron, B., Tang, B., Chern, B., Caucheteux, C., Nayak, C., Bi, C., Marra, C., McConnell, C., Keller, C., Touret, C., Wu, C., Wong, C., Ferrer, C. C., Nikolaidis, C., Allonsius, D., Song, D., Pintz, D., Livshits, D., Esiobu, D., Choudhary, D., Mahajan, D., Garcia-Olano, D., Perino, D., Hupkes, D., Lakomkin, E., AlBadawy, E., Lobanova, E., Dinan, E., Smith, E. M., Radenovic, F., Zhang, F., Synnaeve, G., Lee, G., Anderson, G. L., Nail, G., Mialon, G., Pang, G., Cucurell, G., Nguyen, H., Korevaar, H., Xu, H., Touvron, H., Zarov, I., Ibarra, I. A., Kloumann, I. M., Misra, I., Evtimov, I., Copet, J., Lee, J., Geffert, J., Vranes, J., Park, J., Mahadeokar, J., Shah, J., van der Linde, J., Billock, J., Hong, J., Lee, J., Fu, J., Chi, J., Huang, J., Liu, J., Wang, J., Yu, J., Bitton, J., Spisak, J., Park, J., Rocca, J., Johnstun, J., Saxe, J., Jia, J., Alwala, K. V., Upasani, K., Plawiak, K., Li, K., Heafield, K., Stone, K., and et al. (2024). The llama 3 herd of models. CoRR, abs/2407.21783." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 507, + 507, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 507, + 507, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 507, + 507, + 541 + ], + "type": "text", + "content": "Hendrycks, D., Burns, C., Basart, S., Zou, A., Mazeika, M., Song, D., and Steinhardt, J. (2021a). Measuring massive multitask language understanding. In 9th International Conference on Learning Representations, ICLR 2021, Virtual Event, Austria, May 3-7, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 550, + 507, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 550, + 507, + 594 + ], + "spans": [ + { + "bbox": [ + 106, + 550, + 507, + 594 + ], + "type": "text", + "content": "Hendrycks, D., Burns, C., Kadavath, S., Arora, A., Basart, S., Tang, E., Song, D., and Steinhardt, J. (2021b). Measuring mathematical problem solving with the MATH dataset. In Proceedings of the Neural Information Processing Systems Track on Datasets and Benchmarks 1, NeurIPS Datasets and Benchmarks 2021, December 2021, virtual." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 603, + 507, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 603, + 507, + 637 + ], + "spans": [ + { + "bbox": [ + 106, + 603, + 507, + 637 + ], + "type": "text", + "content": "Horowitz, M. (2014). 1.1 computing's energy problem (and what we can do about it). In 2014 IEEE International Conference on Solid-State Circuits Conference, ISSCC 2014, Digest of Technical Papers, San Francisco, CA, USA, February 9-13, 2014, pages 10-14." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 646, + 507, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 646, + 507, + 669 + ], + "spans": [ + { + "bbox": [ + 106, + 646, + 507, + 669 + ], + "type": "text", + "content": "Hsu, P.-L., Dai, Y., Kothapalli, V., Song, Q., Tang, S., Zhu, S., Shimizu, S., Sahni, S., Ning, H., and Chen, Y. (2024). Liger kernel: Efficient triton kernels for LLM training. CoRR, abs/2410.10989." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 677, + 507, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 677, + 507, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 507, + 722 + ], + "type": "text", + "content": "Hu, S., Tu, Y., Han, X., He, C., Cui, G., Long, X., Zheng, Z., Fang, Y., Huang, Y., Zhao, W., Zhang, X., Thai, Z. L., Zhang, K., Wang, C., Yao, Y., Zhao, C., Zhou, J., Cai, J., Zhai, Z., Ding, N., Jia, C., Zeng, G., Li, D., Liu, Z., and Sun, M. (2024). Minicpm: Unveiling the potential of small language models with scalable training strategies. CoRR, abs/2404.06395." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 118 + ], + "type": "text", + "content": "Joshi, M., Choi, E., Weld, D. S., and Zettlemoyer, L. (2017). Triviaqa: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics, ACL 2017, Vancouver, Canada, July 30 - August 4, Volume 1: Long Papers, pages 1601-1611." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 123, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 123, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 123, + 506, + 168 + ], + "type": "text", + "content": "Li, H., Dong, Q., Tang, Z., Wang, C., Zhang, X., Huang, H., Huang, S., Huang, X., Huang, Z., Zhang, D., Gu, Y., Cheng, X., Wang, X., Chen, S.-Q., Dong, L., Lu, W., Sui, Z., Wang, B., Lam, W., and Wei, F. (2024a). Synthetic data (almost) from scratch: Generalized instruction tuning for language models. CoRR, abs/2402.13064." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 175, + 506, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 506, + 308 + ], + "type": "text", + "content": "Li, J., Fang, A., Smyrnis, G., Ivgi, M., Jordan, M., Gadre, S. Y., Bansal, H., Guha, E., Keh, S. S., Arora, K., Garg, S., Xin, R., Muennighoff, N., Heckel, R., Mercat, J., Chen, M. F., Gururangan, S., Wortsman, M., Albalak, A., Bitton, Y., Nezhurina, M., Abbas, A., Hsieh, C.-Y., Ghosh, D., Gardner, J., Kilian, M., Zhang, H., Shao, R., Pratt, S. M., Sanyal, S., Ilharco, G., Daras, G., Marathe, K., Gokaslan, A., Zhang, J., Chandu, K. R., Nguyen, T., Vasiljevic, I., Kakade, S. M., Song, S., Sanghavi, S., Faghri, F., Oh, S., Zettlemoyer, L., Lo, K., El-Nouby, A., Pouransari, H., Toshev, A., Wang, S., Groeneveld, D., Soldaini, L., Koh, P. W., Jitsev, J., Kollar, T., Dimakis, A., Carmon, Y., Dave, A., Schmidt, L., and Shankar, V. (2024b). Datacomp-lm: In search of the next generation of training sets for language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 314, + 506, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 314, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 314, + 506, + 338 + ], + "type": "text", + "content": "Lian, W., Wang, G., Goodson, B., Pentland, E., Cook, A., Vong, C., and \"Teknium\" (2023). Slimorca: An open dataset of gpt-4 augmented flan reasoning traces, with verification." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 343, + 506, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 343, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 506, + 378 + ], + "type": "text", + "content": "Lin, S., Hilton, J., and Evans, O. (2022). Truthfulqa: Measuring how models mimic human falsehoods. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2022, Dublin, Ireland, May 22-27, 2022, pages 3214-3252." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 384, + 506, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 384, + 506, + 418 + ], + "spans": [ + { + "bbox": [ + 105, + 384, + 506, + 418 + ], + "type": "text", + "content": "Liu, J., Xia, C. S., Wang, Y., and Zhang, L. (2023). Is your code generated by chatgpt really correct? rigorous evaluation of large language models for code generation. Advances in Neural Information Processing Systems, 36:21558-21572." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 425, + 506, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 425, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 506, + 449 + ], + "type": "text", + "content": "Ma, S., Wang, H., Ma, L., Wang, L., Wang, W., Huang, S., Dong, L., Wang, R., Xue, J., and Wei, F. (2024). The era of 1-bit llms: All large language models are in 1.58 bits. CoRR, abs/2402.17764." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 455, + 506, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 455, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 455, + 506, + 479 + ], + "type": "text", + "content": "Mihaylov, T., Clark, P., Khot, T., and Sabharwal, A. (2018). Can a suit of armor conduct electricity? A new dataset for open book question answering. CoRR, abs/1809.02789." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 484, + 506, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 506, + 552 + ], + "type": "text", + "content": "Penedo, G., Kydlícek, H., Allal, L. B., Lozhkov, A., Mitchell, M., Raffel, C. A., von Werra, L., and Wolf, T. (2024). The fineweb datasets: Decanting the web for the finest text data at scale. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 558, + 506, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 558, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 558, + 506, + 604 + ], + "type": "text", + "content": "Rafailov, R., Sharma, A., Mitchell, E., Manning, C. D., Ermon, S., and Finn, C. (2023). Direct preference optimization: Your language model is secretly a reward model. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S., editors, Advances in Neural Information Processing Systems 36." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 609, + 504, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 609, + 504, + 633 + ], + "spans": [ + { + "bbox": [ + 105, + 609, + 504, + 633 + ], + "type": "text", + "content": "Sakaguchi, K., Bras, R. L., Bhagavatula, C., and Choi, Y. (2020). WinoGrande: an adversarial winograd schema challenge at scale. In AAAI, pages 8732-8740." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 639, + 504, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 639, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 639, + 504, + 663 + ], + "type": "text", + "content": "Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. (2017). Proximal policy optimization algorithms. CoRR, abs/1707.06347." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 669, + 506, + 703 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 669, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 105, + 669, + 506, + 703 + ], + "type": "text", + "content": "Shao, Z., Wang, P., Zhu, Q., Xu, R., Song, J., Zhang, M., Li, Y. K., Wu, Y., and Guo, D. (2024). Deepseekmath: Pushing the limits of mathematical reasoning in open language models. CoRR, abs/2402.03300." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 710, + 428, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 710, + 428, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 710, + 428, + 723 + ], + "type": "text", + "content": "Shazeer, N. (2020). GLU variants improve transformer. CoRR, abs/2002.05202." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "Su, J., Ahmed, M. H. M., Lu, Y., Pan, S., Bo, W., and Liu, Y. (2024). Roformer: Enhanced transformer with rotary position embedding. Neurocomputing, 568:127063." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 100, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 100, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 505, + 157 + ], + "type": "text", + "content": "Talmor, A., Herzig, J., Lourie, N., and Berant, J. (2019). Commonsenseqa: A question answering challenge targeting commonsense knowledge. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), pages 4149-4158." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 162, + 505, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 162, + 505, + 196 + ], + "spans": [ + { + "bbox": [ + 106, + 162, + 505, + 196 + ], + "type": "text", + "content": "Tang, Z., Zhang, X., Wang, B., and Wei, F. (2024). Mathscale: Scaling instruction tuning for mathematical reasoning. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 201, + 334, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 201, + 334, + 215 + ], + "spans": [ + { + "bbox": [ + 107, + 201, + 334, + 215 + ], + "type": "text", + "content": "Team, F.-L. (2024). The falcon 3 family of open models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 220, + 507, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 220, + 507, + 245 + ], + "spans": [ + { + "bbox": [ + 107, + 220, + 507, + 245 + ], + "type": "text", + "content": "Team, G., Kamath, A., Ferret, J., Pathak, S., Vieillard, N., Merhej, R., Perrin, S., Matejovicova, T., Ram'e, A., Rivi'ere, M., et al. (2025). Gemma 3 technical report. arXiv preprint arXiv:2503.19786." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 249, + 505, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 249, + 505, + 305 + ], + "spans": [ + { + "bbox": [ + 106, + 249, + 505, + 305 + ], + "type": "text", + "content": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. (2017). Attention is all you need. In Guyon, I., von Luxburg, U., Bengio, S., Wallach, H. M., Fergus, R., Vishwanathan, S. V. N., and Garnett, R., editors, Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 311, + 506, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 311, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 107, + 311, + 506, + 335 + ], + "type": "text", + "content": "Wang, H., Ma, S., Dong, L., Huang, S., Wang, H., Ma, L., Yang, F., Wang, R., Wu, Y., and Wei, F. (2023a). Bitnet: Scaling 1-bit transformers for large language models. CoRR, abs/2310.11453." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 340, + 506, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 340, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 107, + 340, + 506, + 373 + ], + "type": "text", + "content": "Wang, H., Ma, S., Huang, S., Dong, L., Wang, W., Peng, Z., Wu, Y., Bajaj, P., Singhal, S., Benhaim, A., Patra, B., Liu, Z., Chaudhary, V., Song, X., and Wei, F. (2022). Foundation transformers. CoRR." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 107, + 380, + 504, + 403 + ], + "type": "text", + "content": "Wang, H., Ma, S., Wang, R., and Wei, F. (2024a). Q-sparse: All large language models can be fully sparsely-activated. CoRR, abs/2407.10969." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 408, + 506, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 408, + 506, + 432 + ], + "spans": [ + { + "bbox": [ + 107, + 408, + 506, + 432 + ], + "type": "text", + "content": "Wang, H., Ma, S., and Wei, F. (2024b). Bitnet a4.8: 4-bit activations for 1-bit llms. CoRR, abs/2411.04965." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 437, + 506, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 437, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 107, + 437, + 506, + 461 + ], + "type": "text", + "content": "Wang, J., Zhou, H., Song, T., Cao, S., Xia, Y., Cao, T., Wei, J., Ma, S., Wang, H., and Wei, F. (2025). Bitnet.cpp: Efficient edge inference for ternary lms. CoRR, abs/2502.11880." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 467, + 506, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 467, + 506, + 491 + ], + "spans": [ + { + "bbox": [ + 107, + 467, + 506, + 491 + ], + "type": "text", + "content": "Wang, L., Ma, L., Cao, S., Zheng, N., Zhang, Q., Xue, J., Miao, Z., Cao, T., and Yang, Y. (2023b). Ladder: Efficient tensor compilation on customized data format. In OSDI." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 495, + 505, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 495, + 505, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 505, + 540 + ], + "type": "text", + "content": "Xu, C., Sun, Q., Zheng, K., Geng, X., Zhao, P., Feng, J., Tao, C., Lin, Q., and Jiang, D. (2024a). Wizardlm: Empowering large pre-trained language models to follow complex instructions. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 546, + 505, + 602 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 546, + 505, + 602 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 505, + 602 + ], + "type": "text", + "content": "Xu, Y., Han, X., Yang, Z., Wang, S., Zhu, Q., Liu, Z., Liu, W., and Che, W. (2024b). Onebit: Towards extremely low-bit large language models. In Globersons, A., Mackey, L., Belgrave, D., Fan, A., Paquet, U., Tomczak, J. M., and Zhang, C., editors, Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 608, + 506, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 608, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 107, + 608, + 506, + 632 + ], + "type": "text", + "content": "Xu, Z., Jiang, F., Niu, L., Deng, Y., Poovendran, R., Choi, Y., and Lin, B. Y. (2024c). Magpie: Alignment data synthesis from scratch by prompting aligned lms with nothing. CoRR, abs/2406.08464." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 637, + 504, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 637, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 107, + 637, + 504, + 661 + ], + "type": "text", + "content": "Yadav, V., Bethard, S., and Surdeanu, M. (2019). Quick and (not so) dirty: Unsupervised selection of justification sentences for multi-hop question answering. In EMNLP-IJCNLP." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 666, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 666, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 505, + 722 + ], + "type": "text", + "content": "Yang, A., Yang, B., Zhang, B., Hui, B., Zheng, B., Yu, B., Li, C., Liu, D., Huang, F., Wei, H., Lin, H., Yang, J., Tu, J., Zhang, J., Yang, J., Yang, J., Zhou, J., Lin, J., Dang, K., Lu, K., Bao, K., Yang, K., Yu, L., Li, M., Xue, M., Zhang, P., Zhu, Q., Men, R., Lin, R., Li, T., Xia, T., Ren, X., Ren, X., Fan, Y., Su, Y., Zhang, Y., Wan, Y., Liu, Y., Cui, Z., Zhang, Z., and Qiu, Z. (2024). Qwen2.5 technical report. CoRR, abs/2412.15115." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 301 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 505, + 106 + ], + "type": "text", + "content": "Zellers, R., Holtzman, A., Bisk, Y., Farhadi, A., and Choi, Y. (2019). HellaSwag: can a machine really finish your sentence? In Proceedings of the 57th Conference of the Association for Computational Linguistics, pages 4791-4800." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 114, + 505, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 114, + 505, + 137 + ], + "spans": [ + { + "bbox": [ + 105, + 114, + 505, + 137 + ], + "type": "text", + "content": "Zhang, Y., Zhang, Z., and Lew, L. (2022). PokeBNN: A binary pursuit of lightweight accuracy. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12465-12475. IEEE." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 506, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 506, + 178 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 506, + 178 + ], + "type": "text", + "content": "Zhao, W., Ren, X., Hessel, J., Cardie, C., Choi, Y., and Deng, Y. (2024). Wildchat: 1m chatgpt interaction logs in the wild. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 184, + 506, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 184, + 506, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 184, + 506, + 230 + ], + "type": "text", + "content": "Zheng, L., Chiang, W.-L., Sheng, Y., Li, T., Zhuang, S., Wu, Z., Zhuang, Y., Li, Z., Lin, Z., Xing, E. P., Gonzalez, J. E., Stoica, I., and Zhang, H. (2024). Lmsys-chat-1m: A large-scale real-world LLM conversation dataset. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 236, + 506, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 506, + 271 + ], + "type": "text", + "content": "Zheng, L., Chiang, W.-L., Sheng, Y., Zhuang, S., Wu, Z., Zhuang, Y., Lin, Z., Li, Z., Li, D., Xing, E. P., Zhang, H., Gonzalez, J. E., and Stoica, I. (2023). Judging lvm-as-a-judge with mt-bench and chatbot arena. In Advances in Neural Information Processing Systems 36." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 277, + 506, + 301 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 506, + 301 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 506, + 301 + ], + "type": "text", + "content": "Zhou, J., Lu, T., Mishra, S., Brahma, S., Basu, S., Luan, Y., Zhou, D., and Hou, L. (2023). Instruction-following evaluation for large language models. CoRR, abs/2311.07911." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 105, + 321, + 246, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 321, + 246, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 321, + 246, + 335 + ], + "type": "text", + "content": "A Open-weight Baselines" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 346, + 440, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 440, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 440, + 358 + ], + "type": "text", + "content": "We summarize the links to the open-weight LLMs evaluated in this work as below:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 368, + 447, + 560 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 132, + 368, + 380, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 368, + 380, + 379 + ], + "spans": [ + { + "bbox": [ + 132, + 368, + 380, + 379 + ], + "type": "text", + "content": "- LLaMA 3.2 1B: meta-llama/Llama-3.2-1B-Instruct" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 383, + 310, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 383, + 310, + 395 + ], + "spans": [ + { + "bbox": [ + 132, + 383, + 310, + 395 + ], + "type": "text", + "content": "- Gemma-3 1B: google/gemma-3-1b-it" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 398, + 344, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 398, + 344, + 410 + ], + "spans": [ + { + "bbox": [ + 132, + 398, + 344, + 410 + ], + "type": "text", + "content": "Qwen2.5 0.5B: Qwen/Qwen2.5-0.5B-Instruct" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 413, + 344, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 413, + 344, + 425 + ], + "spans": [ + { + "bbox": [ + 132, + 413, + 344, + 425 + ], + "type": "text", + "content": "- Qwen2.5 1.5B: Qwen/Qwen2.5-1.5B-Instruct" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 428, + 326, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 428, + 326, + 440 + ], + "spans": [ + { + "bbox": [ + 132, + 428, + 326, + 440 + ], + "type": "text", + "content": "- Qwen2.5 3B: Qwen/Qwen2.5-3B-Instruct" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 132, + 443, + 395, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 443, + 395, + 455 + ], + "spans": [ + { + "bbox": [ + 132, + 443, + 395, + 455 + ], + "type": "text", + "content": "- SmolLM2 1.7B: HuggingFaceTB/SmolLM2-1.7B-Instruct" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 458, + 348, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 458, + 348, + 470 + ], + "spans": [ + { + "bbox": [ + 132, + 458, + 348, + 470 + ], + "type": "text", + "content": "- MiniCPM 2B: openbmb/MiniCPM-2B-dpo-bf16" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 132, + 473, + 447, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 473, + 447, + 485 + ], + "spans": [ + { + "bbox": [ + 132, + 473, + 447, + 485 + ], + "type": "text", + "content": "- Qwen2.5 1.5B-GPTQ-int4: Qwen/Qwen2.5-1.5B-Instruct-GPTQ-Int4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 132, + 489, + 411, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 489, + 411, + 501 + ], + "spans": [ + { + "bbox": [ + 132, + 489, + 411, + 501 + ], + "type": "text", + "content": "Qwen2.5 1.5B-AWQ-int4: Qwen/Qwen2.5-1.5B-Instruct-AWQ" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 132, + 504, + 283, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 504, + 283, + 516 + ], + "spans": [ + { + "bbox": [ + 132, + 504, + 283, + 516 + ], + "type": "text", + "content": "- Bonsai 0.5B: deepgrove/Bonsai" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 132, + 519, + 362, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 519, + 362, + 530 + ], + "spans": [ + { + "bbox": [ + 132, + 519, + 362, + 530 + ], + "type": "text", + "content": "- OLMo-Bitnet 1B: NousResearch/OLMo-Bitnet-1B" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 132, + 534, + 406, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 534, + 406, + 545 + ], + "spans": [ + { + "bbox": [ + 132, + 534, + 406, + 545 + ], + "type": "text", + "content": "- Falcon3-1.58bit 7B: tiiuae/Falcon3-7B-Instruct-1.58bit" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 132, + 549, + 419, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 549, + 419, + 560 + ], + "spans": [ + { + "bbox": [ + 132, + 549, + 419, + 560 + ], + "type": "text", + "content": "- Llama3-8B-1.58 8B: HF1BitLLM/Llama3-8B-1.58-100B-tokens" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 576, + 266, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 576, + 266, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 576, + 266, + 590 + ], + "type": "text", + "content": "B Evaluation Pipeline Details" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 601, + 506, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 506, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 506, + 624 + ], + "type": "text", + "content": "To ensure standardized evaluation, we employed established toolkits for different benchmark categories. Specifically:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 132, + 632, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 132, + 632, + 443, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 632, + 443, + 644 + ], + "spans": [ + { + "bbox": [ + 132, + 632, + 443, + 644 + ], + "type": "text", + "content": "- For the HumanEval+ coding benchmark, we utilized the evalplus toolkit." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 132, + 647, + 505, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 647, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 132, + 647, + 505, + 669 + ], + "type": "text", + "content": "- For the MATH-500 mathematical reasoning benchmark, we used a customized version of the math-evaluation-harness toolkit." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 132, + 673, + 504, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 673, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 132, + 673, + 504, + 696 + ], + "type": "text", + "content": "- For the MT-Bench conversational benchmark, evaluation was performed using the official LLM Judge open-source codebase." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 132, + 700, + 504, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 132, + 700, + 504, + 722 + ], + "type": "text", + "content": "- For all other benchmarks assessing language understanding, reasoning, knowledge, and comprehension, we used the standard lm-evaluation-harness framework." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 222, + 70, + 388, + 117 + ], + "blocks": [ + { + "bbox": [ + 222, + 70, + 388, + 117 + ], + "lines": [ + { + "bbox": [ + 222, + 70, + 388, + 117 + ], + "spans": [ + { + "bbox": [ + 222, + 70, + 388, + 117 + ], + "type": "table", + "html": "
BitsADD EnergyMUL Energy
FP160.160.34
INT80.0070.07
", + "image_path": "c2ea347c586a5437a02e09c5396b1bc21f19fa3a3f5ae4fc75ee151f66b801d8.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 121, + 499, + 133 + ], + "lines": [ + { + "bbox": [ + 111, + 121, + 499, + 133 + ], + "spans": [ + { + "bbox": [ + 111, + 121, + 499, + 133 + ], + "type": "text", + "content": "Table 4: ADD and MUL energy consumption (in pJ) of different precision at " + }, + { + "bbox": [ + 111, + 121, + 499, + 133 + ], + "type": "inline_equation", + "content": "7\\mathrm{nm}" + }, + { + "bbox": [ + 111, + 121, + 499, + 133 + ], + "type": "text", + "content": " process nodes." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 107, + 153, + 504, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 153, + 504, + 175 + ], + "spans": [ + { + "bbox": [ + 107, + 153, + 504, + 175 + ], + "type": "text", + "content": "Models were prompted using a chat format for generative tasks (e.g., GSM8K, IFEval, and MT-Bench), while default settings from the respective toolkits were used for other tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 180, + 504, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 180, + 504, + 224 + ], + "spans": [ + { + "bbox": [ + 107, + 180, + 504, + 224 + ], + "type": "text", + "content": "For energy consumption, we utilize the energy model in (Horowitz, 2014; Zhang et al., 2022) to estimate the arithmetic operations energy (AOE) of matrix multiplication. The sequence length is set as 512 tokens. We present the energy consumption for ADD and MUL operation at " + }, + { + "bbox": [ + 107, + 180, + 504, + 224 + ], + "type": "inline_equation", + "content": "7\\mathrm{nm}" + }, + { + "bbox": [ + 107, + 180, + 504, + 224 + ], + "type": "text", + "content": " process nodes in Table 4." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 229, + 504, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 229, + 504, + 285 + ], + "spans": [ + { + "bbox": [ + 107, + 229, + 504, + 285 + ], + "type": "text", + "content": "To assess CPU decoding performance, latency measurements were conducted on a Surface Laptop Studio 2 system powered by a 13th Gen Intel Core i7-13800H processor. The benchmarking process utilized 8 CPU threads. Specifically, the BitNet b1.58 2B4T model was tested using its bitnet.cpp implementation, whereas other models were evaluated using the llama.cpp framework. For each model, we generated 128 tokens and report the average latency per token for this task." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_content_list.json b/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d4748c4670698594db51e1255c94b51441742d35 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_content_list.json @@ -0,0 +1,2789 @@ +[ + { + "type": "text", + "text": "WORLDMEM: Long-term Consistent World Simulation with Memory", + "text_level": 1, + "bbox": [ + 272, + 122, + 725, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zeqi Xiao $^{1}$ Yushi Lan $^{1}$ Yifan Zhou $^{1}$ Wenqi Ouyang $^{1}$ Shuai Yang $^{2}$ Yanhong Zeng $^{3}$ Xingang Pan $^{1}$", + "bbox": [ + 294, + 224, + 699, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ S-Lab, Nanyang Technological University,", + "bbox": [ + 354, + 257, + 643, + 273 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Wangxuan Institute of Computer Technology, Peking University", + "bbox": [ + 282, + 273, + 712, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3Shanghai AI Laboratory", + "bbox": [ + 413, + 287, + 583, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zeqi001, yushi001, yifan006, wenqi.ouyang, xingang.pan}@ntu.edu.sg", + "bbox": [ + 240, + 301, + 756, + 316 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "williamyang@pku.edu.cn, zengyh1900@gmail.com", + "bbox": [ + 328, + 316, + 669, + 329 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 364, + 537, + 380 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "World simulation has gained increasing popularity due to its ability to model virtual environments and predict the consequences of actions. However, the limited temporal context window often leads to failures in maintaining long-term consistency, particularly in preserving 3D spatial consistency. In this work, we present WOrLD-MEM, a framework that enhances scene generation with a memory bank consisting of memory units that store memory frames and states (e.g., poses and timestamps). By employing state-aware memory attention that effectively extracts relevant information from these memory frames based on their states, our method is capable of accurately reconstructing previously observed scenes, even under significant viewpoint or temporal gaps. Furthermore, by incorporating timestamps into the states, our framework not only models a static world but also captures its dynamic evolution over time, enabling both perception and interaction within the simulated world. Extensive experiments in both virtual and real scenarios validate the effectiveness of our approach. Project page at https://xizaoqu.github.io/worldmem.", + "bbox": [ + 228, + 396, + 767, + 590 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 621, + 313, + 636 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "World simulation has gained significant attention for its ability to model environments and predict the outcomes of actions (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024). Recent advances in video diffusion models have further propelled this field, enabling high-fidelity rollouts of potential future scenarios based on user actions, such as navigating through an environment or interacting with objects. These capabilities make world simulators particularly promising for applications in autonomous navigation (Feng et al., 2024; Bar et al., 2024) and as viable alternatives to traditional game engines (Decart et al., 2024; Parker-Holder et al., 2024).", + "bbox": [ + 169, + 652, + 826, + 763 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite these advances, a fundamental challenge remains: the limited probing horizon. Due to computational and memory constraints, video generative models operate within a fixed context window and are unable to condition on the full sequence of past generations. Consequently, most existing methods simply discard previously generated content, leading to a critical issue of world inconsistency, which is also revealed in Wang et al. (2025). As illustrated in Figure 1(a), when the camera moves away and returns, the regenerated content diverges from the earlier scene, violating the coherence expected in a consistent world.", + "bbox": [ + 169, + 768, + 823, + 867 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "A natural solution is to maintain an external memory that stores and retrieves relevant historical information outside the generative loop. While intuitive, formulating such a memory mechanism is", + "bbox": [ + 169, + 873, + 823, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.12369v2 [cs.CV] 2 Dec 2025", + "bbox": [ + 22, + 281, + 57, + 715 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "39th Conference on Neural Information Processing Systems (NeurIPS 2025).", + "bbox": [ + 171, + 922, + 629, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg", + "image_caption": [ + "Figure 1: WORLDMEM enables long-term consistent world generation with an integrated memory mechanism. (a) Previous world generation methods typically face the problem of inconsistent world due to limited temporal context window size. (b) WORLDMEM empowers the agent to explore diverse and consistent worlds with an expansive action space, e.g., crafting environments by placing objects like pumpkin light or freely roaming around. Most importantly, after exploring for a while and glancing back, we find the objects we placed are still there, with the inspiring sight of the light melting the surrounding snow, testifying to the passage of time. Red and green boxes indicate scenes that should be consistent." + ], + "image_footnote": [], + "bbox": [ + 174, + 89, + 826, + 354 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "non-trivial. A direct approach might involve explicit 3D scene reconstruction to preserve geometry and detail. However, 3D representations are inflexible in dynamic and evolving environments and are prone to loss of detail, especially for large, unbounded scenes (Wu et al., 2025a).", + "bbox": [ + 169, + 491, + 823, + 532 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Instead, we argue that geometry-free representations offer a more flexible solution. These representations, however, pose their own challenges – particularly in balancing detail retention with memory scalability. For example, implicit approaches like storing abstract features via LoRA modules (Hong et al., 2024) offer compactness but lose visual fidelity and spatial specificity. Some recent works represent visual scenes as discrete tokens encoding fine-grained visual information (Sajjadi et al., 2022; Jiang et al., 2025), but they are limited by a fixed token and struggle to capture the complexity of diverse and evolving environments. To address this issue, we observe that for generating the immediate future, only a small subset of historical content is typically relevant. Based on this, we propose a token-level memory bank that stores all previously generated latent tokens, and retrieves a targeted subset for each generation step based on relevance.", + "bbox": [ + 169, + 539, + 826, + 678 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Conditioning on the retrieved memory requires spatial-temporal reasoning. In contrast to prior work where memory aids local temporal smoothness (Zheng et al., 2024a) or semantic coherence (Wu et al., 2025b; Rahman et al., 2023), long-term world simulation demands reasoning over large spatiotemporal gaps, e.g., memory and query may differ in viewpoint and time, and retain exact scenes with detail. To facilitate this reasoning, we propose augmenting each memory unit with explicit state cues, including spatial location, viewpoint, and timestamp. These cues serve as anchors for reasoning and are embedded as part of the query-key attention mechanism. Through this state-aware attention, our model can effectively reason the current frame with past observations, facilitating accurate and coherent generation. Importantly, such a design leverages standard attention architectures, enabling it to scale naturally with modern hardware and model capacity.", + "bbox": [ + 169, + 683, + 826, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Motivated by this idea, we build our approach, WOrLDMEM, on top of the Conditional Diffusion Transformer (CDiT) (Peebles and Xie, 2023) and the Diffusion Forcing (DF) paradigm (Chen et al., 2025), which autoregressively generates first-person viewpoints conditioned on external action signals. As discussed above, at the core of WOrLDMEM is a memory mechanism composed of a memory bank and memory attention. To ensure efficient and relevant memory retrieval from the bank, we introduce a confidence-based selection strategy that scores memory units based on field-of-view", + "bbox": [ + 169, + 828, + 826, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "(FOV) overlap and temporal proximity. In the memory attention, the latent tokens being generated act as queries, attending to the memory tokens (as keys and values) to incorporate relevant historical context. To ensure robust correspondence across varying viewpoints and time gaps, we enrich both queries and keys with state-aware embeddings. A relative embedding design is introduced to ease the learning of spatial and temporal relationships. This pipeline enables precise, scalable reasoning over long-range memory, ensuring consistency in dynamic and evolving world simulations.", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We evaluate WOrLDMEM on a customized Minecraft benchmark (Fan et al., 2022) and on RealEstate10K (Zhou et al., 2018). The Minecraft benchmark includes diverse terrains (e.g., plains, savannas, and deserts) and various action modalities (movement, viewpoint control, and event triggers), which is a wonderful environment for idea verification. Extensive experiments show that WOrLDMEM significantly improves 3D spatial consistency, enabling robust viewpoint reasoning and high-fidelity scene generation, as shown in Figure 1(b). Furthermore, in dynamic environments, WOrLDMEM accurately tracks and follows evolving events and environment changes, demonstrating its ability to both perceive and interact with the generated world. We hope our promising results and scalable designs will inspire future research on memory-based world simulation.", + "bbox": [ + 169, + 181, + 826, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 339, + 323, + 356 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Video diffusion model. With the rapid advancement of diffusion models (Song et al., 2020; Peebles and Xie, 2023; Chen et al., 2025), video generation has made significant strides (Wang et al., 2023a,b; Chen et al., 2023; Guo et al., 2023; OpenAI, 2024; Jin et al., 2024; Yin et al., 2024). The field has evolved from traditional U-Net-based architectures (Wang et al., 2023a; Chen et al., 2023; Guo et al., 2023) to Transformer-based frameworks (OpenAI, 2024; Ma et al., 2024; Zheng et al., 2024b), enabling video diffusion models to generate highly realistic and temporally coherent videos. Recently, autoregressive video generation (Chen et al., 2025; Kim et al., 2024; Henschel et al., 2024) has emerged as a promising approach to extend video length, theoretically indefinitely. Notably, Diffusion Forcing (Chen et al., 2025) introduces a per-frame noise-level denoising paradigm. Unlike the full-sequence paradigm, which applies a uniform noise level across all frames, per-frame noise-level denoising offers a more flexible approach, enabling autoregressive generation.", + "bbox": [ + 169, + 378, + 826, + 532 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Interactive world simulation. World simulation aims to model an environment by predicting the next state given the current state and action. This concept has been extensively explored in the construction of world models (Ha and Schmidhuber, 2018b) for agent learning (Ha and Schmidhuber, 2018a; Hafner et al., 2019, 2020; Hu et al., 2023; Beattie et al., 2016; Yang et al., 2023). With advances in video generation, high-quality world simulation with robust control has become feasible, leading to numerous works focusing on interactive world simulation (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024; Yu et al., 2025c,a,b). These approaches enable agents to navigate generated environments and interact with them based on external commands.", + "bbox": [ + 169, + 539, + 826, + 662 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, due to context window limitations, such methods discard previously generated content, leading to inconsistencies in the simulated world, particularly in maintaining 3D spatial coherence.", + "bbox": [ + 169, + 670, + 826, + 698 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Consistent world simulation. Ensuring the consistency of a generated world is crucial for effective world simulation Wang et al. (2025). Existing approaches can be broadly categorized into two types: geometric-based and geometric-free. The geometric-based methods explicitly reconstruct the generated world into a 3D/4D representation (Liu et al., 2024; Gao et al., 2024; Wang and Agapito, 2024; Ren et al., 2025; Yu et al., 2024b,a; Liang et al., 2024). While this strategy can reliably maintain consistency, it imposes strict constraints on flexibility: Once the world is reconstructed, modifying or interacting with it becomes challenging. Geometric-free methods focus on implicit learning. Methods like Alonso et al. (2025); Valevski et al. (2024) ensure consistency by overfitting to predefined scenarios (e.g., specific CS:GO or DOOM maps), limiting scalability. StreamingT2V (Henschel et al., 2024) maintains long-term consistency by continuing on both global and local visual contexts from previous frames, while SlowFastGen (Hong et al., 2024) progressively trains LoRA (Hu et al., 2022) modules for memory recall. However, these methods rely on abstract representations, making accurate scene reconstruction challenging. In contrast, our approach retrieves information from previously generated frames and their states, ensuring world consistency without overfitting to specific scenarios.", + "bbox": [ + 169, + 704, + 826, + 911 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 187, + 97, + 619, + 194 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 109, + 769, + 169 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/a94869f851f3a9f0a5887da9940203db5f53e43246aa8bb56a01ceb394a21328.jpg", + "image_caption": [ + "(c) State Embedding" + ], + "image_footnote": [], + "bbox": [ + 189, + 205, + 369, + 318 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg", + "image_caption": [ + "(b) Input Difference", + "(d) Memory Block", + "Figure 2: Comprehensive overview of WOrLDMEM. The framework comprises a conditional diffusion transformer integrated with memory blocks, with a dedicated memory bank storing memory units from previously generated content. By retrieving these memory units from the memory bank and incorporating the information by memory blocks to guide generation, our approach ensures long-term consistency in world simulation." + ], + "image_footnote": [], + "bbox": [ + 421, + 203, + 805, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 WORLDMEM", + "text_level": 1, + "bbox": [ + 171, + 433, + 320, + 448 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This section details the methodology of WOrLDMEM. Sec. 3.1 introduces the relevant preliminaries, while Sec. 3.2 describes the interactive world simulator serving as our baseline. Sec. 3.3 and 3.4 present the core of our proposed memory mechanism.", + "bbox": [ + 169, + 463, + 823, + 506 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Preliminary", + "text_level": 1, + "bbox": [ + 171, + 521, + 295, + 536 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Video diffusion models. Video diffusion models generate video sequences by iteratively denoising Gaussian noise through a learned reverse process:", + "bbox": [ + 169, + 546, + 823, + 575 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k - 1} \\mid \\mathbf {x} _ {t} ^ {k}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t} ^ {k - 1}; \\mu_ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k}, k\\right), \\sigma_ {k} ^ {2} \\mathbf {I}\\right), \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 354, + 578, + 823, + 597 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where all frames $(\\mathbf{x}_t^k)_{1\\leq t\\leq T}$ share the same noise level $k$ and $T$ is the context window length. This full-sequence approach enables global guidance but lacks flexibility in sequence length and autoregressive generation.", + "bbox": [ + 169, + 599, + 823, + 643 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Autoregressive video generation. Autoregressive video generation aims to extend videos over the long term by predicting frames sequentially (Kondratyuk et al., 2024; Wu et al., 2023). While various methods exist for autoregressive generation, Diffusion Forcing (DF) (Chen et al., 2025) provides a neat and effective approach to achieve this. Specifically, DF introduces per-frame noise levels $k_{t}$ :", + "bbox": [ + 169, + 650, + 823, + 705 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\np _ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k _ {t} - 1} \\mid \\mathbf {x} _ {t} ^ {k _ {t}}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t} ^ {k _ {t} - 1}; \\mu_ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k _ {t}}, k _ {t}\\right), \\sigma_ {k _ {t}} ^ {2} \\mathbf {I}\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 339, + 709, + 823, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Unlike full-sequence diffusion, DF generates video flexibly and stably beyond the training horizon. Autoregressive generation is a special case when only the last one or a few frames are noisy. With autoregressive video generation, long-term interactive world simulation becomes feasible.", + "bbox": [ + 169, + 738, + 823, + 780 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Interactive World Simulation", + "text_level": 1, + "bbox": [ + 171, + 795, + 416, + 809 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Before introducing the memory mechanism, we first present our interactive world simulator, which models long video sequences using an auto-regressive conditional diffusion transformer. Interaction is achieved by embedding external control signals, primarily actions, into the model through dedicated conditioning modules (Parker-Holder et al., 2024; Decart et al., 2024; Yu et al., 2025c).", + "bbox": [ + 169, + 821, + 823, + 877 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Following prior work (Decart et al., 2024), we adopt a conditional Diffusion Transformer (DiT) (Peebles and Xie, 2023) architecture for video generation, and Diffusion Forecasting (DF) (Chen et al.,", + "bbox": [ + 169, + 883, + 826, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2025) for autoregressive prediction. As shown in Figure 2(a), our model consists of multiple DiT blocks with spatial and temporal modules for spatiotemporal reasoning. The temporal module applies causal attention to ensure that each frame only attends to preceding frames.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The actions are injected by first projected into the embedding space using a multi-layer perceptron (MLP). The resulting action embeddings are added to the denoising timestep embeddings and injected into the temporal blocks using Adaptive Layer Normalization (AdaLN) (Xu et al., 2019), following the paradigm of Bar et al. (2024); Decart et al. (2024). In our Minecraft experiments, the action space contains 25 dimensions, including movements, view adjustments, and event triggers. We also apply timestep embeddings to the spatial blocks in the same manner, although this is omitted from the figure for clarity. Standard architectural components such as residual connections, multi-head attention, and feedforward networks are also not shown.", + "bbox": [ + 169, + 138, + 826, + 251 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The combination of conditional DiT and DF provides a strong baseline for long-term interactive video generation. However, due to the computational cost of video synthesis, the temporal context window remains limited. As a result, content outside this window is forgotten, which leads to inconsistencies during long-term generation (Decart et al., 2024).", + "bbox": [ + 169, + 256, + 823, + 313 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Memory Representation and Retrieval", + "text_level": 1, + "bbox": [ + 171, + 330, + 480, + 345 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To address the limited context window of video generative models, we introduce a memory mechanism that enables the model to retain and retrieve information beyond the current generation window. This mechanism maintains a memory bank composed of historical frames and their associated state information: $\\{(\\mathbf{x}_i^m,\\mathbf{p}_i,t_i)\\}_{i = 1}^N$ where $\\mathbf{x}_i^m$ denotes a memory frame, $\\mathbf{p}_i\\in \\mathbb{R}^5$ (x,y,z, pitch, yaw) is its pose, and $t_i$ is the timestamp. Each tuple is referred to as a memory unit. We save $\\mathbf{m}_i$ in token-level, which is compressed by the visual encoder but retains enough details for reconstruction. The corresponding states $\\{(\\mathbf{p},t)\\}$ play a critical role not only in memory retrieval but also in enabling state-aware memory conditioning.", + "bbox": [ + 169, + 356, + 454, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Algorithm 1: Memory Retrieval Algorithm", + "text_level": 1, + "bbox": [ + 462, + 359, + 751, + 376 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Input: Memory bank of $N$ historical states $\\{(\\mathbf{x}_i^m,\\mathbf{p}_i,t_i)\\}_{i = 1}^N;$", + "bbox": [ + 462, + 378, + 728, + 405 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Current state $(\\mathbf{x}_c,\\mathbf{p}_c,t_c)$ ; memory condition length $L_{M}$", + "bbox": [ + 462, + 405, + 802, + 417 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Similarity threshold $tr$ ; weights $w_{o}$ , $w_{t}$ .", + "bbox": [ + 462, + 417, + 702, + 429 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Output: A list of selected state indices $S$", + "bbox": [ + 462, + 429, + 710, + 441 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compute Confidence Score:", + "bbox": [ + 462, + 441, + 642, + 454 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compute FOV overlap ratio o via Monte Carlo sampling.", + "bbox": [ + 462, + 454, + 803, + 467 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compute time difference $\\mathbf{d} = \\mathrm{Concat}\\big(\\{|t_i - t_c|\\}_{i = 1}^n\\big)$", + "bbox": [ + 462, + 467, + 787, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compute confidence $\\alpha = \\mathbf{o}\\cdot w_{o} - \\mathbf{d}\\cdot w_{t}$", + "bbox": [ + 462, + 479, + 718, + 492 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Selection with Similarity Filtering:", + "bbox": [ + 462, + 497, + 684, + 508 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Initialize $S = \\varnothing$", + "bbox": [ + 462, + 510, + 565, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "for $m = 1$ to $L_{M}$ do", + "bbox": [ + 462, + 522, + 593, + 535 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Select $i^{*}$ with highest $\\alpha_{i^{*}}$", + "bbox": [ + 480, + 535, + 637, + 547 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Append $i^{*}$ to $S$", + "bbox": [ + 480, + 547, + 576, + 559 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Remove all $j$ where similarity $(i^{*},j) > tr$", + "bbox": [ + 480, + 559, + 732, + 573 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "return $S$", + "bbox": [ + 465, + 575, + 521, + 585 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Memory Retrieval. Since the number of memory frames available for conditioning is limited, an efficient strategy is required to sample memory units from the memory bank. We adopt a greedy matching algorithm based on frame-pair similarity, where similarity is defined using the field-of-view (FOV) overlap ratio and timestamp differences as confidence measures. Algorithm 1 presents our approach to memory retrieval. Although simple, this strategy proves effective in retrieving relevant information for conditioning. Moreover, the model's reasoning over memory helps maintain performance even when the retrieved content is imperfect.", + "bbox": [ + 169, + 611, + 826, + 709 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 State-aware Memory Condition", + "text_level": 1, + "bbox": [ + 171, + 726, + 433, + 741 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After retrieving necessary memory units, unlike prior methods that use memory mainly for temporal smoothness (Zheng et al., 2024a) or semantic guidance (Wu et al., 2025b; Rahman et al., 2023), our goal is to explicitly reconstruct previously seen visual content – even under significant viewpoint or scene changes. This requires the model to perform spatiotemporal reasoning to extract relevant information from memory, which we model using cross-attention (Vaswani et al., 2017). Since relying solely on visual tokens can be ambiguous, we incorporate the corresponding states as cues to enable state-aware attention.", + "bbox": [ + 169, + 752, + 826, + 849 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "State Embedding. State embedding provides essential spatial and temporal context for memory retrieval. To encode spatial information, we adopt Plücker embedding (Sitzmann et al., 2021) to convert 5D poses $\\mathbf{p} \\in \\mathbb{R}^5$ into dense positional features $\\mathrm{PE}(\\mathbf{p}) \\in \\mathbb{R}^{h \\times w \\times 6}$ , following (He et al., 2024; Gao et al., 2024). Temporal context is captured via a lightweight MLP over sinusoidal embedded", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg", + "image_caption": [ + "Figure 3: Qualitative results. We showcase WORLDMEM's capabilities through two sets of examples. Top: A comparison with Ground Truth (GT). WORLDMEM accurately models diverse dynamics (e.g., rain) by conditioning on 600 past frames, ensuring temporal consistency. Bottom: Interaction with the world. Objects like hay in the desert or wheat in the plains persist over time, with wheat visibly growing. For the best experience, see the supplementary videos." + ], + "image_footnote": [], + "bbox": [ + 192, + 95, + 803, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$(SE)$ timestamps. The final embedding is (Figure 2 (c)):", + "bbox": [ + 171, + 598, + 539, + 613 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {E} = G _ {p} (\\mathrm {P E} (\\mathbf {p})) + G _ {t} (\\mathrm {S E} (t)), \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 393, + 618, + 823, + 635 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $G_{p}$ and $G_{t}$ are MLPs mapping pose and time into a shared space.", + "bbox": [ + 169, + 640, + 643, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "State-aware Memory Attention. To support reconstruction under viewpoint and temporal shifts, we introduce a state-aware attention mechanism that incorporates spatial-temporal cues into memory retrieval. By conditioning attention on both visual features and state information, the model achieves more accurate reasoning between input and memory.", + "bbox": [ + 169, + 660, + 823, + 717 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Let $\\mathbf{X}_q\\in \\mathbb{R}^{l_q\\times d}$ denote the flattened feature map of input frames (queries), and $\\mathbf{X}_k\\in \\mathbb{R}^{l_k\\times d}$ the concatenated memory features (keys and values). We first enrich both with their corresponding state embeddings $\\mathbf{E}_q$ and $\\mathbf{E}_k$ :", + "bbox": [ + 169, + 720, + 823, + 765 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {\\mathbf {X}} _ {q} = \\mathbf {X} _ {q} + \\mathbf {E} _ {q}, \\quad \\tilde {\\mathbf {X}} _ {k} = \\mathbf {X} _ {k} + \\mathbf {E} _ {k}. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 765, + 823, + 782 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Cross-attention is then applied to retrieve relevant memory content and output updated $\\mathbf{X}^{\\prime}$ :", + "bbox": [ + 169, + 791, + 766, + 806 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {X} ^ {\\prime} = \\operatorname {C r o s s A t t n} (Q = p _ {q} (\\tilde {\\mathbf {X}} _ {q}), K = p _ {k} (\\tilde {\\mathbf {X}} _ {k}), V = p _ {v} (\\mathbf {X} _ {k})), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 811, + 823, + 830 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $p_q, p_k$ , and $p_v$ are learnable projections.", + "bbox": [ + 169, + 835, + 478, + 851 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To simplify the reasoning space, we adopt a relative state formulation. For each query frame, the state is set to a zero reference (e.g., the pose is reset to the identity and the timestamp to zero), while the states of key frames are normalized to relative values. This design, illustrated in Figure 2(d), improves alignment under viewpoint changes and simplifies the learning objective.", + "bbox": [ + 169, + 854, + 825, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg", + "image_caption": [ + "Figure 4: Within context window evaluation. The motion sequence involves turning right and returning to the original position, showing self-contained consistency." + ], + "image_footnote": [], + "bbox": [ + 176, + 95, + 486, + 239 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/bdee5425e500778b85a96d164d5855b2c6ff216180af2cfc50a7e625931f6040.jpg", + "table_caption": [ + "Table 1: Evaluation on Minecraft" + ], + "table_footnote": [], + "table_body": "
Within context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.14.350.069113.87
DF20.560.009413.88
Ours21.010.007213.73
Beyond context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.///
DF18.040.437651.28
Ours19.320.142915.37
", + "bbox": [ + 153, + 329, + 426, + 498 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/d1e6c276910048364854297556611dd2a45a1eea429cab3c84376a57362243d5.jpg", + "image_caption": [ + "Figure 5: Beyond context window evaluation. Diffusion-Forcing suffers inconsistency over time, while ours maintains quality and recovers past scenes." + ], + "image_footnote": [], + "bbox": [ + 511, + 93, + 820, + 236 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/a2b0f1636dc3ff7c8dfc235adca8ea0a713423b114cd97c50c1377020f680216.jpg", + "table_caption": [ + "Table 2: Ablation on embedding designs" + ], + "table_footnote": [], + "table_body": "
Pose typeEmbed. typePSNR ↑LPIPS ↓rFID ↓
SparseAbsolute14.670.288739.23
DenseAbsolute17.630.183029.34
DenseRelative19.320.142915.37
", + "bbox": [ + 447, + 329, + 820, + 397 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/5be7f40ca0170cf0931349adf148b2063a4d2a61d782d7c6200d3dc6a412a8d7.jpg", + "table_caption": [ + "Table 3: Ablation on memory retrieve strategy" + ], + "table_footnote": [], + "table_body": "
StrategyPSNR ↑LPIPS ↓rFID ↓
Random12.320.322447.35
+ Confidence Filter17.120.186324.33
+ Similarity Filter19.320.142915.37
", + "bbox": [ + 447, + 430, + 782, + 497 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Incorporating memory into pipeline. We incorporate memory frames into the pipeline by treating them as clean inputs during both training and inference. As shown in Figure 2 (a-b), during training, memory frames are assigned the lowest noise level $k_{\\mathrm{min}}$ , while context window frames receive independently sampled noise levels from the range $[k_{\\mathrm{min}}, k_{\\mathrm{max}}]$ . During inference, both memory and context frames are assigned $k_{\\mathrm{min}}$ , while the current generating frames are assigned $k_{\\mathrm{max}}$ .", + "bbox": [ + 169, + 523, + 823, + 594 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To restrict memory influence only to memory blocks, we apply a temporal attention mask:", + "bbox": [ + 169, + 599, + 764, + 616 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nA _ {\\text {m a s k}} (i, j) = \\left\\{ \\begin{array}{l l} 1, & i \\leq L _ {M} \\text {a n d} j = i \\\\ 1, & i > L _ {M} \\text {a n d} j \\leq i \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 366, + 622, + 825, + 672 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $L_{M}$ is the number of memory frames that are appended before frames within the context window. This guarantees causal attention while preventing memory units from affecting each other.", + "bbox": [ + 169, + 680, + 823, + 709 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 728, + 313, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Datasets. We use MineDojo (Fan et al., 2022) to create diverse training and evaluation datasets in Minecraft, configuring diverse environments (e.g., plains, savannas, ice plains, and deserts), agent actions, and interactions. For real-world scenes, we utilize RealEstate10K (Zhou et al., 2018) with camera pose annotations to evaluate long-term world consistency.", + "bbox": [ + 169, + 758, + 823, + 816 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Metrics. For quantitative evaluation, we employ reconstruction metrics, where the method of obtaining ground truth (GT) varies by specific settings. We then assess the consistency and quality of the generated videos using PSNR, LPIPS (Zhang et al., 2018), and reconstruction FID (rFID) (Heusel et al., 2017), which collectively measure pixel-level fidelity, perceptual similarity, and overall realism.", + "bbox": [ + 169, + 820, + 826, + 878 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experimental details. For our experiments on Minecraft (Fan et al., 2022), we utilize the Oasis (Decart et al., 2024) as the base model. Our model is trained using the Adam optimizer with a fixed", + "bbox": [ + 169, + 882, + 825, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg", + "image_caption": [ + "Figure 6: Results on RealEstate (Zhou et al., 2018). We visualize loop closure consistency over a full camera rotation. The visual similarity between the first and last frames serves as a qualitative indicator of 3D spatial consistency." + ], + "image_footnote": [], + "bbox": [ + 181, + 90, + 493, + 250 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ddc2586f3345e661f33248b6cc0087c4703090e0712fed894a0128507250e4b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 503, + 92, + 810, + 250 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/aae9a2f9f9ff8ee0fcb633f9a1bd4ffb65580a3167cf48d007a602089fab10cb.jpg", + "table_caption": [ + "Table 4: Evaluation on RealEstate10K" + ], + "table_footnote": [], + "table_body": "
MethodsPSNR ↑LPIPS ↓rFID ↓
CameraCtrl (He et al., 2024)13.190.3328133.81
TrajAttn (Xiao et al., 2024)14.220.3698128.36
Viewcrafter (Yu et al., 2024c)21.720.172958.43
DFoT (Song et al., 2025)16.420.2933110.34
Ours23.340.167243.14
", + "bbox": [ + 300, + 321, + 692, + 415 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "learning rate of $2 \\times 10^{-5}$ . Training is conducted at a resolution of $640 \\times 360$ , where frames are first encoded into a latent space via a VAE at a resolution of $32 \\times 18$ , then further patchified to $16 \\times 9$ . Our training dataset comprises approximately 12K long videos, each containing 1500 frames, generated from Fan et al. (2022). During training, we employ an 8-frame temporal context window alongside an 8-frame memory window. The model is trained for approximately 500K steps using 4 GPUs, with a batch size of 4 per GPU. For the hyperparameters specified in Algorithm 1 of the main paper, we set the similarity threshold $tr$ to 0.9, $w_{o}$ to 1, and $w_{t}$ to $0.2 / t_{c}$ . For the noise levels in Eq. (5) and Eq. (6), we set $k_{\\min}$ to 15 and $k_{\\max}$ to 1000.", + "bbox": [ + 169, + 425, + 826, + 537 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For our experiments on RealEstate10K (Zhou et al., 2018), we adopt DFoT (Song et al., 2025) as the base model. The RealEstate10K dataset provides a training set of approximately 65K short video clips. Training is conducted at a resolution of $256 \\times 256$ , with frames patched to $128 \\times 128$ . The model is trained for approximately 50K steps using 4 GPUs, with a batch size of 8 per GPU.", + "bbox": [ + 169, + 542, + 823, + 599 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Results on Generation Benchmark", + "text_level": 1, + "bbox": [ + 171, + 616, + 454, + 630 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparisons on Minecraft Benchmark. We compare our approach with a standard full-sequence (Full Seq.) training method (He et al., 2024; Wang et al., 2024) and Diffusion Forcing (DF) (Chen et al., 2025). The key differences are as follows: the full-sequence conditional diffusion transformer (Peebles and Xie, 2023) maintains the same noise level during training and inference, DF introduces different noise levels for training and inference, and our method incorporates a memory mechanism. To assess both short-term and long-term world consistency, we conduct evaluations within and beyond the context window. We evaluate both settings on 300 test videos. In the following experiments, the agent's poses are generated by the game simulator as ground truth. However, in real-world scenarios, only the action input is available, and the pose is not directly observable. In such cases, the next-frame pose can be predicted based on the previous scenes, past states, and the upcoming action. We explore this design choice in the supplementary material.", + "bbox": [ + 169, + 641, + 826, + 794 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Within context window. For this experiment, all methods use a context window of 16, while our approach additionally maintains a memory window of 8. We test on customized motion scenarios (e.g., turn left, then turn right or move forward, then backward) to assess self-contained consistency, where the ground truth consists of previously generated frames at the same positions. As shown in Table 1 and Figure 4, the full-sequence baseline suffers from inconsistencies even within its own context window. DF improves consistency by enabling greater information exchange among generated frames. Our memory-based approach achieves the best performance, demonstrating the effectiveness of integrating a dedicated memory mechanism.", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/9e63e34502d19e1a9660587fe1b448ca4cbb22eb6399eea90032701c88909b36.jpg", + "table_caption": [ + "Table 5: Ablation on sampling strategy for training" + ], + "table_footnote": [], + "table_body": "
Sampling strategyPSNR ↑LPIPS ↓rFID ↓
Small-range13.230.378646.55
Large-range15.110.385542.96
Progressive19.320.142915.37
", + "bbox": [ + 321, + 112, + 671, + 183 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Beyond context window. In this setting, all methods use a context window of 8 and generate 100 future frames; our method further employs a memory window of 8 while initializing a 600-frame memory bank. We compute the reconstruction error using the subsequent 100 ground truth frames after 600 frames. Full-sequence methods can not roll out that long so we exclude it. DF exhibits poor PSNR and LPIPS scores, indicating severe inconsistency with the ground truth beyond the context window. Additionally, its low rFID suggests notable quality degradation. In contrast, our memory-augmented approach consistently outperforms others across all metrics, demonstrating superior long-term consistency and quality preservation. Figure 5 further substantiates these findings.", + "bbox": [ + 169, + 190, + 826, + 303 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 3 showcases WORLDMEM's capabilities. The top section demonstrates its ability to operate in a free action space across diverse environments. Given a 600-frame memory bank, our model generates 100 future frames while preserving the ground truth's actions and poses, ensuring strong world consistency. The bottom section highlights dynamic environment interaction. By using timestamps as embeddings, the model remembers environmental changes and captures natural event evolution, such as plant growth over time.", + "bbox": [ + 169, + 306, + 823, + 391 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Comparisons on Real Scenarios. We compare our method with prior works (He et al., 2024; Xiao et al., 2024; Yu et al., 2024c; Song et al., 2025) on the RealEstate10K dataset (Zhou et al., 2018). We design 5 evaluation trajectories, each starting and ending at the same pose, across 100 scenes. The trajectory lengths range from 37 to 60 frames – exceeding the training lengths of all baselines (maximum 25 frames).", + "bbox": [ + 169, + 397, + 826, + 467 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "CameraCtrl (He et al., 2024), TrajAttn (Xiao et al., 2024), and DFoT (Song et al., 2025) discard past frames and suffer from inconsistency. Viewcrafter (Yu et al., 2024c) incorporates explicit 3D reconstruction, yielding better results, but is constrained by errors in post-processing such as reconstruction and rendering. As shown in Table 4 and Figure 6, our approach achieves superior performance across all metrics. However, the RealEstate dataset inherently limits the full potential of our method, as it consists of short, non-interactive clips with limited temporal complexity. We leave evaluation under more challenging and interactive real-world scenarios for future work.", + "bbox": [ + 169, + 472, + 823, + 571 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2 Ablation", + "text_level": 1, + "bbox": [ + 171, + 588, + 274, + 602 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "**Embedding designs.** The design of embeddings within the memory block is crucial for cross-frame relationship modeling. We evaluate three strategies (Table 2): (1) sparse pose embedding with absolute encoding, (2) dense pose embedding with absolute encoding, and (3) dense pose embedding with relative encoding. Results show that dense pose embeddings (Plücker embedding) significantly enhance all metrics, emphasizing the benefits of richer pose representations. Switching from absolute to relative encoding further improves performance, particularly in LPIPS and rFID, by facilitating relationship reasoning and information retrieval. As illustrated in Figure 7, absolute embeddings accumulate errors over time, while relative embeddings maintain stability even beyond 300 frames.", + "bbox": [ + 169, + 614, + 823, + 726 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Sampling strategy for training. We compare different sampling strategies during training in the Minecraft benchmark. Small-range sampling restricts memory conditioning to frames within $2\\mathrm{m}$ in the Minecraft world, while large-range sampling extends this range to $8\\mathrm{m}$ . Progressive sampling, on the other hand, begins with small-range samples for initial training steps and then gradually expands to large-range samples.", + "bbox": [ + 169, + 731, + 823, + 801 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As shown in Table 5, both small-range and large-range sampling struggle with consistency and quality, whereas progressive sampling significantly improves all metrics. This suggests that gradually increasing difficulty during training helps the model learn to reason and effectively query information from memory blocks.", + "bbox": [ + 169, + 806, + 823, + 864 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Time condition. We ablate the effectiveness of the timestamp condition (for both embedding and retrieval) in Table 6. We curate 100 video samples featuring placing events and evaluate whether future generations align with event progression. As shown in the table, incorporating the time", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/c891de3c6e4cfb733593f322c26a2b2245c57e9ed2ba6ef2a8161dce2e9e97c1.jpg", + "image_caption": [ + "Figure 7: Long-term Generation Comparison. This figure presents the PSNR of different ablation methods compared to the ground truth over a 300-frame sequence. The results show that our method without memory blocks or using random memory retrieval exhibits immediate inconsistencies with the ground truth. Additionally, the model lacking relative embeddings begins to degrade significantly beyond 100 frames. In contrast, our full method maintains strong consistency even beyond 300 frames." + ], + "image_footnote": [], + "bbox": [ + 181, + 93, + 478, + 234 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg", + "image_caption": [ + "Figure 8: Results w/o and w/ time condition. Without timestamps, the model fails to differentiate memory units from the same location at different times, causing errors. With time conditioning, it aligns with the updated world state, ensuring consistency." + ], + "image_footnote": [], + "bbox": [ + 511, + 99, + 821, + 176 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/30d17f42dbaa4ca8d8b12815ea604146efbf347f4ae14367a292fdb24ea2af4b.jpg", + "table_caption": [ + "Table 6: Ablation on time condition" + ], + "table_footnote": [], + "table_body": "
Time conditionPSNR ↑LPIPS ↓rFID ↓
w/o17.170.198923.89
w/19.120.161316.53
", + "bbox": [ + 511, + 328, + 821, + 383 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "condition significantly improves PSNR and LPIPS, indicating that adding temporal information helps the model faithfully reproduce event changes in world simulation. Since events like plant growth are inherently unpredictable, we do not conduct quantitative evaluations on such cases but instead provide qualitative illustrations in Figure 8.", + "bbox": [ + 169, + 411, + 823, + 468 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Memory retrieve strategy. We analyze memory retrieval strategies in Table 3. Random sampling from the memory bank leads to poor performance and severe quality degradation, as evidenced by a sharp drop in rFID and rapid divergence from the ground truth (Figure 7). The confidence-based filtering significantly enhances consistency and generation quality. Additionally, we refine retrieval by filtering out redundant memory units based on similarity, further improving all evaluation metrics and demonstrating the effectiveness of our approach.", + "bbox": [ + 169, + 474, + 823, + 556 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Limitations and Future works", + "text_level": 1, + "bbox": [ + 171, + 580, + 455, + 595 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Despite the effectiveness of our approach, certain issues warrant further exploration. First, we cannot guarantee that we can always retrieve all necessary information from the memory bank In some corner cases (e.g., when views are blocked by obstacles), relying solely on view overlap may be insufficient. Second, our current interaction with the environment lacks diversity and realism. In future work, we plan to extend our models to real-world scenarios with more realistic and varied interactions. Lastly, our memory design still entails linearly increasing memory usage, which may impose limitations when handling extremely long sequences.", + "bbox": [ + 169, + 613, + 823, + 710 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 733, + 299, + 750 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In conclusion, WOrLDMEM tackles the longstanding challenge of maintaining long-term consistency in world simulation by employing a memory bank of past frames and associated states. Its memory attention mechanism enables accurate reconstruction of previously observed scenes, even under large viewpoints or temporal gaps, and effectively models dynamic changes over time. Extensive experiments in both virtual and real settings confirm WOrLDMEM's capacity for robust, immersive world simulation. We hope our work will encourage further research on the design and applications of memory-based world simulators.", + "bbox": [ + 169, + 766, + 823, + 864 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgements. This research is supported by the National Research Foundation, Singapore, under its NRF Fellowship Award . This research is also supported by NTU SUG-NAP, as well as cash and in-kind funding from NTU S-Lab and industry partner(s).", + "bbox": [ + 169, + 869, + 826, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Eloi Alonso, Adam Jelley, Vincent Micheli, Anssi Kanervisto, Amos J Storkey, Tim Pearce, and François Fleuret. Diffusion for world modeling: Visual details matter in atari. Advances in Neural Information Processing Systems, 37:58757-58791, 2025.", + "Amir Bar, Gaoyue Zhou, Danny Tran, Trevor Darrell, and Yann LeCun. Navigation world models, 2024.", + "Charles Beattie, Joel Z Leibo, Denis Teplyashin, Tom Ward, Marcus Wainwright, Heinrich Kuttler, Andrew Lefrancq, Simon Green, Víctor Valdés, Amir Sadik, et al. Deepmind lab. arXiv preprint arXiv:1612.03801, 2016.", + "Boyuan Chen, Diego Martí Monsó, Yilun Du, Max Simchowitz, Russ Tedrake, and Vincent Sitzmann. Diffusion forcing: Next-token prediction meets full-sequence diffusion. Advances in Neural Information Processing Systems, 37:24081-24125, 2025.", + "Haoxin Chen, Menghan Xia, Yingqing He, Yong Zhang, Xiaodong Cun, Shaoshu Yang, Jinbo Xing, Yaofang Liu, Qifeng Chen, Xintao Wang, et al. Videocraft1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512, 2023.", + "Decart, Julian Quevedo, Quinn McIntyre, Spruce Campbell, Xinlei Chen, and Robert Wachen. Oasis: A universe in a transformer. 2024. Project website.", + "Linxi Fan, Guanzhi Wang, Yunfan Jiang, Ajay Mandlekar, Yuncong Yang, Haoyi Zhu, Andrew Tang, DeAn Huang, Yuke Zhu, and Anima Anandkumar. Minedojo: Building open-ended embodied agents with internet-scale knowledge. Advances in Neural Information Processing Systems, 35:18343-18362, 2022.", + "Ruili Feng, Han Zhang, Zhantao Yang, Jie Xiao, Zhilei Shu, Zhiheng Liu, Andy Zheng, Yukun Huang, Yu Liu, and Hongyang Zhang. The matrix: Infinite-horizon world generation with real-time moving control. arXiv preprint arXiv:2412.03568, 2024.", + "Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. arXiv preprint arXiv:2405.10314, 2024.", + "Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023.", + "David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018a.", + "David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018b.", + "Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019.", + "Danijar Hafner, Timothy Lillicrap, Mohammad Norouzi, and Jimmy Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020.", + "Hao He, Yinghao Xu, Yuwei Guo, Gordon Wetzstein, Bo Dai, Hongsheng Li, and Ceyuan Yang. Cameractrol: Enabling camera control for text-to-video generation. arXiv preprint arXiv:2404.02101, 2024.", + "Roberto Henschel, Levon Khachatryan, Daniil Hayrapetyan, Hayk Poghosyan, Vahram Tadevosyan, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Streamingt2v: Consistent, dynamic, and extendable long video generation from text. arXiv preprint arXiv:2403.14773, 2024.", + "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.", + "Yining Hong, Beide Liu, Maxine Wu, Yuanhao Zhai, Kai-Wei Chang, Linjie Li, Kevin Lin, Chung-Ching Lin, Jianfeng Wang, Zhengyuan Yang, Ying Nian Wu, and Lijuan Wang Wang. Slowfast-vgen: Slow-fast learning for action-driven long video generation. arXiv preprint arXiv:2410.23277, 2024.", + "Anthony Hu, Lloyd Russell, Hudson Yeo, Zak Murez, George Fedoseev, Alex Kendall, Jamie Shotton, and Gianluca Corrado. Gaia-1: A generative world model for autonomous driving. arXiv preprint arXiv:2309.17080, 2023." + ], + "bbox": [ + 171, + 112, + 828, + 910 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022.", + "Hanwen Jiang, Hao Tan, Peng Wang, Haian Jin, Yue Zhao, Sai Bi, Kai Zhang, Fujun Luan, Kalyan Sunkavalli, Qixing Huang, et al. Rayzer: A self-supervised large view synthesis model. arXiv preprint arXiv:2505.00702, 2025.", + "Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024.", + "Jihwan Kim, Junoh Kang, Jinyoung Choi, and Bohyung Han. FIFO-diffusion: Generating infinite videos from text without training. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.", + "Dan Kondratyuk, Lijun Yu, Xiuye Gu, José Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Josh Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A. Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation, 2024.", + "Hanwen Liang, Junli Cao, Vidit Goel, Guocheng Qian, Sergei Korolev, Demetri Terzopoulos, Konstantinos N Plataniotis, Sergey Tulyakov, and Jian Ren. Wonderland: Navigating 3d scenes from a single image. arXiv preprint arXiv:2412.12091, 2024.", + "Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model. arXiv preprint arXiv:2408.16767, 2024.", + "Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Ziwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. Latte: Latent diffusion transformer for video generation. arXiv preprint arXiv:2401.03048, 2024.", + "OpenAI. Video generation models as world simulators. https://openai.com/research/video-generation-models-as-world-simulators, 2024.", + "Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjeyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Roktaschel. Genie 2: A large-scale foundation world model. 2024.", + "William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023.", + "Tanzila Rahman, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, Shweta Mahajan, and Leonid Sigal. Make-a-story: Visual memory conditioned consistent story generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2493-2502, 2023.", + "Xuanchi Ren, Tianchang Shen, Jiahui Huang, Huan Ling, Yifan Lu, Merlin Nimier-David, Thomas Müller, Alexander Keller, Sanja Fidler, and Jun Gao. Gen3c: 3d-informed world-consistent video generation with precise camera control. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025.", + "Mehdi SM Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, et al. Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6229-6238, 2022.", + "Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021.", + "Kiwhan Song, Boyuan Chen, Max Simchowitz, Yilun Du, Russ Tedrake, and Vincent Sitzmann. History-guided video diffusion. arXiv preprint arXiv:2502.06764, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 911 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020.", + "Dani Valevski, Yaniv Leviathan, Moab Arar, and Shlomi Fruchter. Diffusion models are real-time game engines. arXiv preprint arXiv:2408.14837, 2024.", + "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need, 2017.", + "Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024.", + "Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023a.", + "Jing Wang, Fengzhuo Zhang, Xiaoli Li, Vincent YF Tan, Tianyu Pang, Chao Du, Aixin Sun, and Zhuoran Yang. Error analyses of auto-regressive video diffusion models: A unified framework. arXiv preprint arXiv:2503.10704, 2025.", + "Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023b.", + "Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. Motionctrl: A unified and flexible motion controller for video generation. In ACM SIGGRAPH 2024 Conference Papers, pages 1-11, 2024.", + "Sibo Wu, Congrong Xu, Binbin Huang, Andreas Geiger, and Anpei Chen. Genfusion: Closing the loop between reconstruction and generation via videos. arXiv preprint arXiv:2503.21219, 2025a.", + "Tong Wu, Zhihao Fan, Xiao Liu, Yeyun Gong, Yelong Shen, Jian Jiao, Hai-Tao Zheng, Juntao Li, Zhongyu Wei, Jian Guo, Nan Duan, and Weizhu Chen. Ar-diffusion: Auto-regressive diffusion model for text generation, 2023.", + "Xindi Wu, Uriel Singer, Zhaojiang Lin, Andrea Madotto, Xide Xia, Yifan Xu, Paul Crook, Xin Luna Dong, and Seungwhan Moon. Corgi: Cached memory guided video generation. In 2025 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 4585-4594. IEEE, 2025b.", + "Zeqi Xiao, Wenqi Ouyang, Yifan Zhou, Shuai Yang, Lei Yang, Jianlou Si, and Xingang Pan. Trajectory attention for fine-grained video motion control. arXiv preprint arXiv:2411.19324, 2024.", + "Jingjing Xu, Xu Sun, Zhiyuan Zhang, Guangxiang Zhao, and Junyang Lin. Understanding and improving layer normalization. Advances in neural information processing systems, 32, 2019.", + "Mengjiao Yang, Yilun Du, Kamyar Ghasemipour, Jonathan Tompson, Dale Schuurmans, and Pieter Abbeel. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 1(2):6, 2023.", + "Tianwei Yin, Qiang Zhang, Richard Zhang, William T Freeman, Fredo Durand, Eli Shechtman, and Xun Huang. From slow bidirectional to fast causal video generators. arXiv preprint arXiv:2412.07772, 2024.", + "Hong-Xing Yu, Haoyi Duan, Charles Herrmann, William T Freeman, and Jiajun Wu. Wonderworld: Interactive 3d scene generation from a single image. arXiv preprint arXiv:2406.09394, 2024a.", + "Hong-Xing Yu, Haoyi Duan, Junhwa Hur, Kyle Sargent, Michael Rubinstein, William T Freeman, Forrester Cole, Deqing Sun, Noah Snavely, Jiajun Wu, et al. Wonderjourney: Going from anywhere to everywhere. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6658-6667, 2024b.", + "Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, Kun Gai, Hao Chen, and Xihui Liu. A survey of interactive generative video. arXiv preprint arXiv:2504.21853, 2025a.", + "Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Position: Interactive generative video as next-generation game engine. arXiv preprint arXiv:2503.17359, 2025b.", + "Jiwen Yu, Yiran Qin, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Gamefactory: Creating new games with generative interactive videos. arXiv preprint arXiv:2501.08325, 2025c." + ], + "bbox": [ + 171, + 90, + 828, + 911 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024c.", + "Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018.", + "Longtao Zheng, Yifan Zhang, Hanzhong Guo, Jiachun Pan, Zhenxiong Tan, Jiahao Lu, Chuanxin Tang, Bo An, and Shuicheng Yan. Memo: Memory-guided diffusion for expressive talking video generation. arXiv preprint arXiv:2412.04448, 2024a.", + "Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. Open-sora: Democratizing efficient video production for all, 2024b.", + "Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images. In SIGGRAPH, 2018." + ], + "bbox": [ + 173, + 90, + 825, + 297 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "7 Supplementary Materials", + "text_level": 1, + "bbox": [ + 171, + 89, + 419, + 107 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "7.1 Details and Experiments", + "text_level": 1, + "bbox": [ + 171, + 119, + 385, + 136 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "**Embedding designs.** We present the detailed designs of embeddings for timesteps, actions, poses, and timestamps in Figure 10, where $F, C, H, W, A$ denote the frame number, channel count, height, width, and action count, respectively.", + "bbox": [ + 169, + 146, + 826, + 189 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The input pose is parameterized by position $(x,z,y)$ and orientation (pitch $\\theta$ and yaw $\\phi$ ). The extrinsic matrix $\\mathbf{T} \\in \\mathbb{R}^{4 \\times 4}$ is formed as:", + "bbox": [ + 169, + 194, + 823, + 222 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {T} = \\left[ \\begin{array}{l l} \\mathbf {R} _ {c} & \\mathbf {c} \\\\ \\mathbf {0} ^ {T} & 1 \\end{array} \\right], \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 441, + 227, + 825, + 262 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "where $\\mathbf{c} = (x,z,y)^T$ and $\\mathbf{R}_c = \\mathbf{R}_y(\\phi)\\mathbf{R}_x(\\theta)$", + "bbox": [ + 169, + 268, + 480, + 287 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To encode camera pose, we adopt the Plücker embedding. Given a pixel $(u,v)$ with normalized camera coordinates:", + "bbox": [ + 169, + 291, + 823, + 318 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\pi} _ {u v} = \\mathbf {K} ^ {- 1} [ u, v, 1 ] ^ {T}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 318, + 825, + 335 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "its world direction is:", + "bbox": [ + 171, + 339, + 315, + 353 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {d} _ {u v} = \\mathbf {R} _ {c} \\boldsymbol {\\pi} _ {u v} + \\mathbf {c}. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 429, + 353, + 825, + 369 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The Plücker embedding is:", + "bbox": [ + 171, + 373, + 351, + 388 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {l} _ {u v} = \\left(\\mathbf {c} \\times \\mathbf {d} _ {u v}, \\mathbf {d} _ {u v}\\right) \\in \\mathbb {R} ^ {6}. \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 386, + 825, + 404 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For a frame of size $H \\times W$ , the full embedding is:", + "bbox": [ + 169, + 406, + 506, + 422 + ], + "page_idx": 14 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {L} _ {i} \\in \\mathbb {R} ^ {H \\times W \\times 6}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 441, + 429, + 825, + 446 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Memory context length. We evaluate how different memory context lengths affect performance in the Minecraft benchmark. Table 7 shows that increasing the context length from 1 to 8 steadily boosts PSNR, lowers LPIPS, and reduces rFID. However, extending the length to 16 deteriorates results, indicating that excessive memory frames may introduce noise or reduce retrieval precision. A context length of 8 provides the best trade-off, yielding the highest PSNR and the lowest LPIPS and rFID.", + "bbox": [ + 169, + 460, + 825, + 532 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Pose prediction. For interactive play, ground truth poses are not accessible. To address this, we designed a lightweight pose prediction module that estimates the pose of the next frame. As illustrated in Figure 9, the predictor takes the previous image, the previous pose, and the upcoming action as inputs and outputs the predicted next pose. This module enables the system to operate using actions alone, eliminating the need for ground truth poses during inference. In Table 8, we compare the performance of using predicted poses versus ground truth poses. While using ground truth poses yields better results across all metrics, the performance drop with predicted poses is acceptable. This is because our method does not rely heavily on precise pose predictions – new frames are generated based on these predictions – and the ground truth poses generated by the Minecraft simulator also contain a certain degree of randomness.", + "bbox": [ + 169, + 537, + 826, + 676 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/f0004364140102367ad4d60778876a7c4c25fdc4c20dc2f01eae89736c3023e5.jpg", + "table_caption": [ + "Table 7: Ablation on length of memory context length" + ], + "table_footnote": [], + "table_body": "
LengthPSNR ↑LPIPS ↓rFID ↓
116.180.189920.47
418.680.156816.54
819.320.142915.37
1617.140.168718.33
", + "bbox": [ + 359, + 714, + 638, + 797 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/fa911a9e797ad09c23c99f30039cff030ba8c0a685c01c115f0232972453d2aa.jpg", + "table_caption": [ + "Table 8: Comparison between using predicted poses and ground truth poses" + ], + "table_footnote": [], + "table_body": "
Pose TypePSNR ↑LPIPS ↓rFID ↓
Ground truth19.320.142915.37
Predicted17.130.178620.36
", + "bbox": [ + 339, + 844, + 658, + 901 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg", + "image_caption": [ + "Figure 9: Structure of pose predictor." + ], + "image_footnote": [], + "bbox": [ + 346, + 98, + 651, + 256 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg", + "image_caption": [ + "(a) Timestep embedding", + "(b) Action embedding" + ], + "image_footnote": [], + "bbox": [ + 346, + 310, + 643, + 468 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg", + "image_caption": [ + "(c) Pose embedding", + "(d) Timestamp embedding", + "Figure 10: Illustration of different embeddings." + ], + "image_footnote": [], + "bbox": [ + 348, + 518, + 643, + 686 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "7.2 Memory Usage and Scalability Analysis", + "text_level": 1, + "bbox": [ + 171, + 767, + 490, + 782 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "To assess the scalability and practical feasibility of our method, we provide detailed quantitative analysis covering memory usage, generation duration, training cost, and inference efficiency.", + "bbox": [ + 169, + 792, + 823, + 821 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Memory Usage of the Memory Bank. The memory bank is lightweight. Storing 600 visual memory tokens with shape [600, 16, 18, 32] in float32 takes approximately 21MB.", + "bbox": [ + 169, + 838, + 823, + 868 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Retrieval Latency. Below we report the average retrieval time (for 8 memory frames) as a function of memory bank size:", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg", + "image_caption": [ + "Figure 11: Two-view FOV overlapping visualization." + ], + "image_footnote": [], + "bbox": [ + 339, + 97, + 651, + 262 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/311186cc1d836831fefdca576808fd26c822e109d2ddfda303da4ad7c48f137b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Number of Memory CandidatesRetrieval Time (s)
100.04
1000.06
6000.10
10000.16
", + "bbox": [ + 313, + 311, + 683, + 398 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The generation cost (20 denoising steps) is $\\sim 0.9$ s per frame. Retrieval time accounts for only $10 - 20\\%$ of total inference time even with 1000 candidates.", + "bbox": [ + 169, + 411, + 823, + 439 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Comparison with Baseline. We compare our method with a baseline model (without memory), under consistent settings: 8 context frames, 8 memory frames, 20 denoising steps, and no acceleration techniques, on single H200.", + "bbox": [ + 169, + 455, + 825, + 500 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/9491f18c075d1aadae6c839aeb0789004bda41f81162625333e60688614b9348.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodTrainingInference
Mem. UsageSpeed (it/s)Mem. UsageSpeed (it/s)
w/o Memory33 GB3.199 GB1.03
with Memory51 GB1.7611 GB0.89
", + "bbox": [ + 243, + 513, + 753, + 585 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Adding memory introduces moderate training overhead. During inference, the impact is minimal: only a small increase in memory usage and a slight decrease in speed.", + "bbox": [ + 169, + 598, + 823, + 627 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Inference Optimization. With modern acceleration techniques (e.g., timestep distillation, early exit, sparse attention), inference speed can reach $\\sim 10$ FPS, making our method practical for deployment.", + "bbox": [ + 169, + 643, + 825, + 672 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "FOV Overlapping Computation. We present the details of Monte Carlo-based FOV overlapping computation in Alg. 11, as well as the two-view overlapping sampling in Figure 11.", + "bbox": [ + 169, + 676, + 823, + 708 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "7.3 Visualizations", + "text_level": 1, + "bbox": [ + 171, + 724, + 310, + 739 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this section, we provide more visualization of different aspects to facilitate understanding.", + "bbox": [ + 169, + 752, + 779, + 768 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Minecraft Training Examples. We present a diverse set of training environments that include various terrain types, action spaces, and weather conditions, as shown in Figure 12. These variations help enhance the model's adaptability and robustness in different scenarios.", + "bbox": [ + 169, + 772, + 823, + 815 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Trajectory Examples in Minecraft. Figure 13 illustrates trajectory examples in the x-z space over 100 frames. The agent's movement exhibits a random action pattern, ensuring diverse learning objectives and a broad range of sampled experiences.", + "bbox": [ + 169, + 820, + 823, + 863 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Pose Distribution. We collect and visualize 800 samples within a sampling range of 8, as shown in Figure 14. The random pattern observed in Figure 14 ensures a diverse distribution of sampled poses in space, which is beneficial for learning the reasoning process within the memory blocks.", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Algorithm 2: Monte Carlo-based FOV Overlap Computation (Notationally Disjoint)", + "text_level": 1, + "bbox": [ + 171, + 94, + 730, + 109 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Input:", + "text_level": 1, + "bbox": [ + 173, + 112, + 220, + 126 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $Q_{\\mathrm{ref}} \\in \\mathbb{R}^{F \\times 5}$ : reference poses from memory bank (x,y,z,pitch,yaw), $F$ is the number of stored poses.", + "- $Q_{\\mathrm{tgt}} \\in \\mathbb{R}^5$ : pose of the current (target) frame.", + "- $M$ : number of 3D sample points (default 10,000).", + "- $R$ : radius of the sampling sphere (default $30\\mathrm{m}$ ).", + "- $\\phi_h$ , $\\phi_v$ : horizontal/vertical field-of-view angles (in degrees)." + ], + "bbox": [ + 215, + 125, + 810, + 229 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Output:", + "text_level": 1, + "bbox": [ + 171, + 234, + 233, + 248 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- $\\rho \\in \\mathbb{R}^F$ : overlapping ratios between each reference pose and the target pose.", + "bbox": [ + 215, + 251, + 735, + 268 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "begin", + "text_level": 1, + "bbox": [ + 171, + 272, + 215, + 285 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$\\triangle$ Step 1: Random Sampling in a Sphere", + "text_level": 1, + "bbox": [ + 196, + 284, + 483, + 297 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Generate $M$ points $\\mathbf{q}$ uniformly in a 3D sphere of radius $R$ :", + "bbox": [ + 196, + 297, + 589, + 311 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {q} \\leftarrow \\text {P o i n t S a m p l i n g} (M, R).\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 319, + 607, + 335 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$\\Delta$ Step 2: Translate Points to $Q_{\\mathrm{tgt}}$ as Center", + "text_level": 1, + "bbox": [ + 194, + 342, + 508, + 358 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Let $Q_{\\mathrm{tgt}}(x,y,z)$ be the 3D coordinates of the current camera pose. Shift all sampled points:", + "bbox": [ + 194, + 357, + 800, + 372 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {q} \\leftarrow \\mathbf {q} + Q _ {\\mathrm {t g t}} (x, y, z).\n$$\n", + "text_format": "latex", + "bbox": [ + 416, + 378, + 584, + 395 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$\\Delta$ Step 3: FOV Checks", + "text_level": 1, + "bbox": [ + 196, + 401, + 359, + 416 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Compute a boolean matrix $\\mathbf{v}_{\\mathrm{ref}} \\in \\{0,1\\}^{F \\times M}$ , where each entry indicates if a point in $\\mathbf{q}$ lies in the FOV of a reference pose:", + "bbox": [ + 194, + 414, + 800, + 445 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {v} _ {\\mathrm {r e f}} \\leftarrow \\operatorname {I s I n s i d e F O V} \\big (\\mathbf {q}, Q _ {\\mathrm {r e f}}, \\phi_ {h}, \\phi_ {v} \\big).\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 450, + 640, + 478 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Similarly, compute a boolean vector $\\mathbf{v}_{\\mathrm{tg}} \\in \\{0,1\\}^{M}$ for the target pose:", + "bbox": [ + 204, + 484, + 676, + 502 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {v} _ {\\mathrm {t g t}} \\leftarrow \\operatorname {I s I n s i d e F O V} \\big (\\mathbf {q}, Q _ {\\mathrm {t g t}}, \\phi_ {h}, \\phi_ {v} \\big).\n$$\n", + "text_format": "latex", + "bbox": [ + 362, + 508, + 640, + 535 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "$\\Delta$ Step 4: Overlapping Ratio Computation", + "text_level": 1, + "bbox": [ + 196, + 540, + 496, + 555 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Obtain the final overlapping ratio vector $\\pmb {\\rho}\\in \\mathbb{R}^{F}$ by combining $\\mathbf{v}_{\\mathrm{ref}}$ and $\\mathbf{v}_{\\mathrm{tgt}}$ . For instance,", + "bbox": [ + 196, + 554, + 795, + 570 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {\\rho} [ i ] = \\frac {1}{M} \\sum_ {j = 1} ^ {M} \\left(\\mathbf {v} _ {\\mathrm {r e f}} [ i, j ] \\cdot \\mathbf {v} _ {\\mathrm {t g t}} [ j ]\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 578, + 622, + 621 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "to measure the fraction of sampled points that are visible in both the $i$ -th reference pose and the target pose.", + "bbox": [ + 202, + 627, + 800, + 656 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Return $\\rho$", + "bbox": [ + 197, + 656, + 269, + 670 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "end", + "text_level": 1, + "bbox": [ + 171, + 669, + 202, + 681 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "More Qualitative Results. For additional qualitative examples, we recommend consulting the attached web page, which offers enhanced visualizations.", + "bbox": [ + 169, + 717, + 823, + 746 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 119, + 336, + 189 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/9e7db2f2124ac20efa2edca5ec9e00b0b6d99dcd36451b763e6533c66aad42f2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 119, + 496, + 189 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/9e73712af021410a3cc6091b7f271192d0761ef5e0bbac919792a0ed2bc5e942.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 119, + 658, + 189 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/b4112e672e093bd908e0d8b47f478e0720181eb791bfc4170bf71a494e2cad04.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 119, + 818, + 189 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 191, + 336, + 261 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/b2fb6711c620d4ecd7ae763026c5f9b183d24737649a7f6c09253b2753625d9a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 191, + 496, + 261 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 191, + 658, + 261 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 191, + 818, + 261 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 263, + 336, + 332 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/e45872d86a46e6fd48f3db8a029253a71ef4ec766b935af977cd99b1b0592ac2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 263, + 496, + 332 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 263, + 658, + 332 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/c559b904ce54ed8b960ef64d960db2b2626c58caca3d6c512ff9ee87f3d438a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 263, + 818, + 332 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/e7a0d754531749230bbd4a9a05832bffb34d64393251380876fd18cad297f056.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 179, + 334, + 336, + 402 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/e21770768b7f5933c8a6579cb05bb882d2db0cd3a528b963e76b28e85fc3f88e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 334, + 496, + 402 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 334, + 658, + 402 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 334, + 818, + 402 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/d885c7d407ce44ff8633d17891065c18fb770ac9a2dbdb8904c89b0abb280874.jpg", + "image_caption": [ + "Figure 12: Training Examples. Our training environments encompass diverse terrains, action spaces, and weather conditions, providing a comprehensive setting for learning." + ], + "image_footnote": [], + "bbox": [ + 179, + 406, + 336, + 474 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 406, + 496, + 474 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/ef99e53069789de1c72b40dec9daf83482c8b1d58b900b04b7b673a7536cdbeb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 406, + 658, + 474 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/b7b3bfbd5ce5428451351fb16ec099c6e94af83843c985d33a1808711993472e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 405, + 818, + 474 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/f69aaeecb41bae0b361ddaf6325948bcdd310d6207adc846da6a8605dba8f003.jpg", + "image_caption": [ + "Figure 13: Visualization of Trajectory Examples in the X-Z Space. The axis scales represent distances within the Minecraft environment." + ], + "image_footnote": [], + "bbox": [ + 274, + 580, + 718, + 840 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg", + "image_caption": [ + "Figure 14: Visualization of Relative Pose Distribution for Training in X-Z Space. Red dots indicate positions, while yellow arrows represent directions." + ], + "image_footnote": [], + "bbox": [ + 274, + 316, + 709, + 638 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + } +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_model.json b/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_model.json new file mode 100644 index 0000000000000000000000000000000000000000..51ac03335e6cbd2e11fee4c89905a412d1e801f7 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_model.json @@ -0,0 +1,3584 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.282, + 0.058, + 0.716 + ], + "angle": 270, + "content": "arXiv:2504.12369v2 [cs.CV] 2 Dec 2025" + }, + { + "type": "title", + "bbox": [ + 0.274, + 0.123, + 0.726, + 0.175 + ], + "angle": 0, + "content": "WORLDMEM: Long-term Consistent World Simulation with Memory" + }, + { + "type": "text", + "bbox": [ + 0.295, + 0.225, + 0.7, + 0.256 + ], + "angle": 0, + "content": "Zeqi Xiao\\(^{1}\\) Yushi Lan\\(^{1}\\) Yifan Zhou\\(^{1}\\) Wenqi Ouyang\\(^{1}\\) Shuai Yang\\(^{2}\\) Yanhong Zeng\\(^{3}\\) Xingang Pan\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.356, + 0.258, + 0.644, + 0.274 + ], + "angle": 0, + "content": "\\(^{1}\\)S-Lab, Nanyang Technological University," + }, + { + "type": "text", + "bbox": [ + 0.284, + 0.274, + 0.713, + 0.289 + ], + "angle": 0, + "content": "\\(^{2}\\)Wangxuan Institute of Computer Technology, Peking University" + }, + { + "type": "text", + "bbox": [ + 0.415, + 0.289, + 0.584, + 0.303 + ], + "angle": 0, + "content": "3Shanghai AI Laboratory" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.303, + 0.758, + 0.317 + ], + "angle": 0, + "content": "{zeqi001, yushi001, yifan006, wenqi.ouyang, xingang.pan}@ntu.edu.sg" + }, + { + "type": "text", + "bbox": [ + 0.329, + 0.318, + 0.67, + 0.33 + ], + "angle": 0, + "content": "williamyang@pku.edu.cn, zengyh1900@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.366, + 0.538, + 0.381 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.397, + 0.769, + 0.592 + ], + "angle": 0, + "content": "World simulation has gained increasing popularity due to its ability to model virtual environments and predict the consequences of actions. However, the limited temporal context window often leads to failures in maintaining long-term consistency, particularly in preserving 3D spatial consistency. In this work, we present WOrLD-MEM, a framework that enhances scene generation with a memory bank consisting of memory units that store memory frames and states (e.g., poses and timestamps). By employing state-aware memory attention that effectively extracts relevant information from these memory frames based on their states, our method is capable of accurately reconstructing previously observed scenes, even under significant viewpoint or temporal gaps. Furthermore, by incorporating timestamps into the states, our framework not only models a static world but also captures its dynamic evolution over time, enabling both perception and interaction within the simulated world. Extensive experiments in both virtual and real scenarios validate the effectiveness of our approach. Project page at https://xizaoqu.github.io/worldmem." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.622, + 0.314, + 0.637 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.653, + 0.828, + 0.764 + ], + "angle": 0, + "content": "World simulation has gained significant attention for its ability to model environments and predict the outcomes of actions (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024). Recent advances in video diffusion models have further propelled this field, enabling high-fidelity rollouts of potential future scenarios based on user actions, such as navigating through an environment or interacting with objects. These capabilities make world simulators particularly promising for applications in autonomous navigation (Feng et al., 2024; Bar et al., 2024) and as viable alternatives to traditional game engines (Decart et al., 2024; Parker-Holder et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.825, + 0.868 + ], + "angle": 0, + "content": "Despite these advances, a fundamental challenge remains: the limited probing horizon. Due to computational and memory constraints, video generative models operate within a fixed context window and are unable to condition on the full sequence of past generations. Consequently, most existing methods simply discard previously generated content, leading to a critical issue of world inconsistency, which is also revealed in Wang et al. (2025). As illustrated in Figure 1(a), when the camera moves away and returns, the regenerated content diverges from the earlier scene, violating the coherence expected in a consistent world." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.874, + 0.825, + 0.903 + ], + "angle": 0, + "content": "A natural solution is to maintain an external memory that stores and retrieves relevant historical information outside the generative loop. While intuitive, formulating such a memory mechanism is" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.63, + 0.937 + ], + "angle": 0, + "content": "39th Conference on Neural Information Processing Systems (NeurIPS 2025)." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.176, + 0.09, + 0.827, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.365, + 0.828, + 0.476 + ], + "angle": 0, + "content": "Figure 1: WORLDMEM enables long-term consistent world generation with an integrated memory mechanism. (a) Previous world generation methods typically face the problem of inconsistent world due to limited temporal context window size. (b) WORLDMEM empowers the agent to explore diverse and consistent worlds with an expansive action space, e.g., crafting environments by placing objects like pumpkin light or freely roaming around. Most importantly, after exploring for a while and glancing back, we find the objects we placed are still there, with the inspiring sight of the light melting the surrounding snow, testifying to the passage of time. Red and green boxes indicate scenes that should be consistent." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.492, + 0.825, + 0.534 + ], + "angle": 0, + "content": "non-trivial. A direct approach might involve explicit 3D scene reconstruction to preserve geometry and detail. However, 3D representations are inflexible in dynamic and evolving environments and are prone to loss of detail, especially for large, unbounded scenes (Wu et al., 2025a)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.54, + 0.827, + 0.679 + ], + "angle": 0, + "content": "Instead, we argue that geometry-free representations offer a more flexible solution. These representations, however, pose their own challenges – particularly in balancing detail retention with memory scalability. For example, implicit approaches like storing abstract features via LoRA modules (Hong et al., 2024) offer compactness but lose visual fidelity and spatial specificity. Some recent works represent visual scenes as discrete tokens encoding fine-grained visual information (Sajjadi et al., 2022; Jiang et al., 2025), but they are limited by a fixed token and struggle to capture the complexity of diverse and evolving environments. To address this issue, we observe that for generating the immediate future, only a small subset of historical content is typically relevant. Based on this, we propose a token-level memory bank that stores all previously generated latent tokens, and retrieves a targeted subset for each generation step based on relevance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.684, + 0.827, + 0.824 + ], + "angle": 0, + "content": "Conditioning on the retrieved memory requires spatial-temporal reasoning. In contrast to prior work where memory aids local temporal smoothness (Zheng et al., 2024a) or semantic coherence (Wu et al., 2025b; Rahman et al., 2023), long-term world simulation demands reasoning over large spatiotemporal gaps, e.g., memory and query may differ in viewpoint and time, and retain exact scenes with detail. To facilitate this reasoning, we propose augmenting each memory unit with explicit state cues, including spatial location, viewpoint, and timestamp. These cues serve as anchors for reasoning and are embedded as part of the query-key attention mechanism. Through this state-aware attention, our model can effectively reason the current frame with past observations, facilitating accurate and coherent generation. Importantly, such a design leverages standard attention architectures, enabling it to scale naturally with modern hardware and model capacity." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Motivated by this idea, we build our approach, WOrLDMEM, on top of the Conditional Diffusion Transformer (CDiT) (Peebles and Xie, 2023) and the Diffusion Forcing (DF) paradigm (Chen et al., 2025), which autoregressively generates first-person viewpoints conditioned on external action signals. As discussed above, at the core of WOrLDMEM is a memory mechanism composed of a memory bank and memory attention. To ensure efficient and relevant memory retrieval from the bank, we introduce a confidence-based selection strategy that scores memory units based on field-of-view" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "(FOV) overlap and temporal proximity. In the memory attention, the latent tokens being generated act as queries, attending to the memory tokens (as keys and values) to incorporate relevant historical context. To ensure robust correspondence across varying viewpoints and time gaps, we enrich both queries and keys with state-aware embeddings. A relative embedding design is introduced to ease the learning of spatial and temporal relationships. This pipeline enables precise, scalable reasoning over long-range memory, ensuring consistency in dynamic and evolving world simulations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.828, + 0.306 + ], + "angle": 0, + "content": "We evaluate WOrLDMEM on a customized Minecraft benchmark (Fan et al., 2022) and on RealEstate10K (Zhou et al., 2018). The Minecraft benchmark includes diverse terrains (e.g., plains, savannas, and deserts) and various action modalities (movement, viewpoint control, and event triggers), which is a wonderful environment for idea verification. Extensive experiments show that WOrLDMEM significantly improves 3D spatial consistency, enabling robust viewpoint reasoning and high-fidelity scene generation, as shown in Figure 1(b). Furthermore, in dynamic environments, WOrLDMEM accurately tracks and follows evolving events and environment changes, demonstrating its ability to both perceive and interact with the generated world. We hope our promising results and scalable designs will inspire future research on memory-based world simulation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.34, + 0.325, + 0.357 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.38, + 0.828, + 0.533 + ], + "angle": 0, + "content": "Video diffusion model. With the rapid advancement of diffusion models (Song et al., 2020; Peebles and Xie, 2023; Chen et al., 2025), video generation has made significant strides (Wang et al., 2023a,b; Chen et al., 2023; Guo et al., 2023; OpenAI, 2024; Jin et al., 2024; Yin et al., 2024). The field has evolved from traditional U-Net-based architectures (Wang et al., 2023a; Chen et al., 2023; Guo et al., 2023) to Transformer-based frameworks (OpenAI, 2024; Ma et al., 2024; Zheng et al., 2024b), enabling video diffusion models to generate highly realistic and temporally coherent videos. Recently, autoregressive video generation (Chen et al., 2025; Kim et al., 2024; Henschel et al., 2024) has emerged as a promising approach to extend video length, theoretically indefinitely. Notably, Diffusion Forcing (Chen et al., 2025) introduces a per-frame noise-level denoising paradigm. Unlike the full-sequence paradigm, which applies a uniform noise level across all frames, per-frame noise-level denoising offers a more flexible approach, enabling autoregressive generation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.54, + 0.828, + 0.663 + ], + "angle": 0, + "content": "Interactive world simulation. World simulation aims to model an environment by predicting the next state given the current state and action. This concept has been extensively explored in the construction of world models (Ha and Schmidhuber, 2018b) for agent learning (Ha and Schmidhuber, 2018a; Hafner et al., 2019, 2020; Hu et al., 2023; Beattie et al., 2016; Yang et al., 2023). With advances in video generation, high-quality world simulation with robust control has become feasible, leading to numerous works focusing on interactive world simulation (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024; Yu et al., 2025c,a,b). These approaches enable agents to navigate generated environments and interact with them based on external commands." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.827, + 0.699 + ], + "angle": 0, + "content": "However, due to context window limitations, such methods discard previously generated content, leading to inconsistencies in the simulated world, particularly in maintaining 3D spatial coherence." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.828, + 0.912 + ], + "angle": 0, + "content": "Consistent world simulation. Ensuring the consistency of a generated world is crucial for effective world simulation Wang et al. (2025). Existing approaches can be broadly categorized into two types: geometric-based and geometric-free. The geometric-based methods explicitly reconstruct the generated world into a 3D/4D representation (Liu et al., 2024; Gao et al., 2024; Wang and Agapito, 2024; Ren et al., 2025; Yu et al., 2024b,a; Liang et al., 2024). While this strategy can reliably maintain consistency, it imposes strict constraints on flexibility: Once the world is reconstructed, modifying or interacting with it becomes challenging. Geometric-free methods focus on implicit learning. Methods like Alonso et al. (2025); Valevski et al. (2024) ensure consistency by overfitting to predefined scenarios (e.g., specific CS:GO or DOOM maps), limiting scalability. StreamingT2V (Henschel et al., 2024) maintains long-term consistency by continuing on both global and local visual contexts from previous frames, while SlowFastGen (Hong et al., 2024) progressively trains LoRA (Hu et al., 2022) modules for memory recall. However, these methods rely on abstract representations, making accurate scene reconstruction challenging. In contrast, our approach retrieves information from previously generated frames and their states, ensuring world consistency without overfitting to specific scenarios." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.189, + 0.098, + 0.62, + 0.195 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.111, + 0.77, + 0.17 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.685, + 0.181, + 0.778, + 0.193 + ], + "angle": 0, + "content": "(b) Input Difference" + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.206, + 0.37, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.33, + 0.331, + 0.342 + ], + "angle": 0, + "content": "(c) State Embedding" + }, + { + "type": "image", + "bbox": [ + 0.423, + 0.204, + 0.807, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.575, + 0.33, + 0.662, + 0.341 + ], + "angle": 0, + "content": "(d) Memory Block" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.352, + 0.825, + 0.422 + ], + "angle": 0, + "content": "Figure 2: Comprehensive overview of WOrLDMEM. The framework comprises a conditional diffusion transformer integrated with memory blocks, with a dedicated memory bank storing memory units from previously generated content. By retrieving these memory units from the memory bank and incorporating the information by memory blocks to guide generation, our approach ensures long-term consistency in world simulation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.434, + 0.321, + 0.449 + ], + "angle": 0, + "content": "3 WORLDMEM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.464, + 0.825, + 0.507 + ], + "angle": 0, + "content": "This section details the methodology of WOrLDMEM. Sec. 3.1 introduces the relevant preliminaries, while Sec. 3.2 describes the interactive world simulator serving as our baseline. Sec. 3.3 and 3.4 present the core of our proposed memory mechanism." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.522, + 0.296, + 0.537 + ], + "angle": 0, + "content": "3.1 Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.547, + 0.825, + 0.576 + ], + "angle": 0, + "content": "Video diffusion models. Video diffusion models generate video sequences by iteratively denoising Gaussian noise through a learned reverse process:" + }, + { + "type": "equation", + "bbox": [ + 0.355, + 0.579, + 0.825, + 0.598 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k - 1} \\mid \\mathbf {x} _ {t} ^ {k}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t} ^ {k - 1}; \\mu_ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k}, k\\right), \\sigma_ {k} ^ {2} \\mathbf {I}\\right), \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.825, + 0.645 + ], + "angle": 0, + "content": "where all frames \\((\\mathbf{x}_t^k)_{1\\leq t\\leq T}\\) share the same noise level \\(k\\) and \\(T\\) is the context window length. This full-sequence approach enables global guidance but lacks flexibility in sequence length and autoregressive generation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.651, + 0.825, + 0.707 + ], + "angle": 0, + "content": "Autoregressive video generation. Autoregressive video generation aims to extend videos over the long term by predicting frames sequentially (Kondratyuk et al., 2024; Wu et al., 2023). While various methods exist for autoregressive generation, Diffusion Forcing (DF) (Chen et al., 2025) provides a neat and effective approach to achieve this. Specifically, DF introduces per-frame noise levels \\( k_{t} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.34, + 0.71, + 0.825, + 0.731 + ], + "angle": 0, + "content": "\\[\np _ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k _ {t} - 1} \\mid \\mathbf {x} _ {t} ^ {k _ {t}}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t} ^ {k _ {t} - 1}; \\mu_ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k _ {t}}, k _ {t}\\right), \\sigma_ {k _ {t}} ^ {2} \\mathbf {I}\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.825, + 0.781 + ], + "angle": 0, + "content": "Unlike full-sequence diffusion, DF generates video flexibly and stably beyond the training horizon. Autoregressive generation is a special case when only the last one or a few frames are noisy. With autoregressive video generation, long-term interactive world simulation becomes feasible." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.796, + 0.418, + 0.81 + ], + "angle": 0, + "content": "3.2 Interactive World Simulation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.822, + 0.825, + 0.878 + ], + "angle": 0, + "content": "Before introducing the memory mechanism, we first present our interactive world simulator, which models long video sequences using an auto-regressive conditional diffusion transformer. Interaction is achieved by embedding external control signals, primarily actions, into the model through dedicated conditioning modules (Parker-Holder et al., 2024; Decart et al., 2024; Yu et al., 2025c)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Following prior work (Decart et al., 2024), we adopt a conditional Diffusion Transformer (DiT) (Peebles and Xie, 2023) architecture for video generation, and Diffusion Forecasting (DF) (Chen et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "2025) for autoregressive prediction. As shown in Figure 2(a), our model consists of multiple DiT blocks with spatial and temporal modules for spatiotemporal reasoning. The temporal module applies causal attention to ensure that each frame only attends to preceding frames." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.252 + ], + "angle": 0, + "content": "The actions are injected by first projected into the embedding space using a multi-layer perceptron (MLP). The resulting action embeddings are added to the denoising timestep embeddings and injected into the temporal blocks using Adaptive Layer Normalization (AdaLN) (Xu et al., 2019), following the paradigm of Bar et al. (2024); Decart et al. (2024). In our Minecraft experiments, the action space contains 25 dimensions, including movements, view adjustments, and event triggers. We also apply timestep embeddings to the spatial blocks in the same manner, although this is omitted from the figure for clarity. Standard architectural components such as residual connections, multi-head attention, and feedforward networks are also not shown." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.257, + 0.825, + 0.314 + ], + "angle": 0, + "content": "The combination of conditional DiT and DF provides a strong baseline for long-term interactive video generation. However, due to the computational cost of video synthesis, the temporal context window remains limited. As a result, content outside this window is forgotten, which leads to inconsistencies during long-term generation (Decart et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.331, + 0.482, + 0.346 + ], + "angle": 0, + "content": "3.3 Memory Representation and Retrieval" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.357, + 0.455, + 0.606 + ], + "angle": 0, + "content": "To address the limited context window of video generative models, we introduce a memory mechanism that enables the model to retain and retrieve information beyond the current generation window. This mechanism maintains a memory bank composed of historical frames and their associated state information: \\(\\{(\\mathbf{x}_i^m,\\mathbf{p}_i,t_i)\\}_{i = 1}^N\\) where \\(\\mathbf{x}_i^m\\) denotes a memory frame, \\(\\mathbf{p}_i\\in \\mathbb{R}^5\\) (x,y,z, pitch, yaw) is its pose, and \\(t_i\\) is the timestamp. Each tuple is referred to as a memory unit. We save \\(\\mathbf{m}_i\\) in token-level, which is compressed by the visual encoder but retains enough details for reconstruction. The corresponding states \\(\\{(\\mathbf{p},t)\\}\\) play a critical role not only in memory retrieval but also in enabling state-aware memory conditioning." + }, + { + "type": "title", + "bbox": [ + 0.464, + 0.361, + 0.753, + 0.377 + ], + "angle": 0, + "content": "Algorithm 1: Memory Retrieval Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.379, + 0.73, + 0.406 + ], + "angle": 0, + "content": "Input: Memory bank of \\(N\\) historical states \\(\\{(\\mathbf{x}_i^m,\\mathbf{p}_i,t_i)\\}_{i = 1}^N;\\)" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.406, + 0.803, + 0.418 + ], + "angle": 0, + "content": "Current state \\((\\mathbf{x}_c,\\mathbf{p}_c,t_c)\\) ; memory condition length \\(L_{M}\\)" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.418, + 0.703, + 0.43 + ], + "angle": 0, + "content": "Similarity threshold \\( tr \\); weights \\( w_{o} \\), \\( w_{t} \\)." + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.43, + 0.712, + 0.442 + ], + "angle": 0, + "content": "Output: A list of selected state indices \\( S \\)" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.442, + 0.643, + 0.455 + ], + "angle": 0, + "content": "Compute Confidence Score:" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.455, + 0.805, + 0.468 + ], + "angle": 0, + "content": "Compute FOV overlap ratio o via Monte Carlo sampling." + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.468, + 0.789, + 0.481 + ], + "angle": 0, + "content": "Compute time difference \\(\\mathbf{d} = \\mathrm{Concat}\\big(\\{|t_i - t_c|\\}_{i = 1}^n\\big)\\)" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.481, + 0.72, + 0.493 + ], + "angle": 0, + "content": "Compute confidence \\(\\alpha = \\mathbf{o}\\cdot w_{o} - \\mathbf{d}\\cdot w_{t}\\)" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.498, + 0.685, + 0.51 + ], + "angle": 0, + "content": "Selection with Similarity Filtering:" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.511, + 0.566, + 0.522 + ], + "angle": 0, + "content": "Initialize \\(S = \\varnothing\\)" + }, + { + "type": "text", + "bbox": [ + 0.464, + 0.523, + 0.594, + 0.536 + ], + "angle": 0, + "content": "for \\(m = 1\\) to \\(L_{M}\\) do" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.536, + 0.638, + 0.548 + ], + "angle": 0, + "content": "Select \\(i^{*}\\) with highest \\(\\alpha_{i^{*}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.548, + 0.578, + 0.56 + ], + "angle": 0, + "content": "Append \\(i^{*}\\) to \\(S\\)" + }, + { + "type": "text", + "bbox": [ + 0.482, + 0.56, + 0.733, + 0.574 + ], + "angle": 0, + "content": "Remove all \\(j\\) where similarity \\((i^{*},j) > tr\\)" + }, + { + "type": "text", + "bbox": [ + 0.466, + 0.576, + 0.522, + 0.587 + ], + "angle": 0, + "content": "return \\(S\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.612, + 0.827, + 0.71 + ], + "angle": 0, + "content": "Memory Retrieval. Since the number of memory frames available for conditioning is limited, an efficient strategy is required to sample memory units from the memory bank. We adopt a greedy matching algorithm based on frame-pair similarity, where similarity is defined using the field-of-view (FOV) overlap ratio and timestamp differences as confidence measures. Algorithm 1 presents our approach to memory retrieval. Although simple, this strategy proves effective in retrieving relevant information for conditioning. Moreover, the model's reasoning over memory helps maintain performance even when the retrieved content is imperfect." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.727, + 0.434, + 0.742 + ], + "angle": 0, + "content": "3.4 State-aware Memory Condition" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.827, + 0.85 + ], + "angle": 0, + "content": "After retrieving necessary memory units, unlike prior methods that use memory mainly for temporal smoothness (Zheng et al., 2024a) or semantic guidance (Wu et al., 2025b; Rahman et al., 2023), our goal is to explicitly reconstruct previously seen visual content – even under significant viewpoint or scene changes. This requires the model to perform spatiotemporal reasoning to extract relevant information from memory, which we model using cross-attention (Vaswani et al., 2017). Since relying solely on visual tokens can be ambiguous, we incorporate the corresponding states as cues to enable state-aware attention." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "State Embedding. State embedding provides essential spatial and temporal context for memory retrieval. To encode spatial information, we adopt Plücker embedding (Sitzmann et al., 2021) to convert 5D poses \\(\\mathbf{p} \\in \\mathbb{R}^5\\) into dense positional features \\(\\mathrm{PE}(\\mathbf{p}) \\in \\mathbb{R}^{h \\times w \\times 6}\\), following (He et al., 2024; Gao et al., 2024). Temporal context is captured via a lightweight MLP over sinusoidal embedded" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.194, + 0.096, + 0.805, + 0.509 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.519, + 0.825, + 0.59 + ], + "angle": 0, + "content": "Figure 3: Qualitative results. We showcase WORLDMEM's capabilities through two sets of examples. Top: A comparison with Ground Truth (GT). WORLDMEM accurately models diverse dynamics (e.g., rain) by conditioning on 600 past frames, ensuring temporal consistency. Bottom: Interaction with the world. Objects like hay in the desert or wheat in the plains persist over time, with wheat visibly growing. For the best experience, see the supplementary videos." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.599, + 0.54, + 0.614 + ], + "angle": 0, + "content": "\\((SE)\\) timestamps. The final embedding is (Figure 2 (c)):" + }, + { + "type": "equation", + "bbox": [ + 0.394, + 0.619, + 0.825, + 0.636 + ], + "angle": 0, + "content": "\\[\n\\mathbf {E} = G _ {p} (\\mathrm {P E} (\\mathbf {p})) + G _ {t} (\\mathrm {S E} (t)), \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.641, + 0.645, + 0.656 + ], + "angle": 0, + "content": "where \\( G_{p} \\) and \\( G_{t} \\) are MLPs mapping pose and time into a shared space." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.661, + 0.825, + 0.718 + ], + "angle": 0, + "content": "State-aware Memory Attention. To support reconstruction under viewpoint and temporal shifts, we introduce a state-aware attention mechanism that incorporates spatial-temporal cues into memory retrieval. By conditioning attention on both visual features and state information, the model achieves more accurate reasoning between input and memory." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.722, + 0.825, + 0.766 + ], + "angle": 0, + "content": "Let \\(\\mathbf{X}_q\\in \\mathbb{R}^{l_q\\times d}\\) denote the flattened feature map of input frames (queries), and \\(\\mathbf{X}_k\\in \\mathbb{R}^{l_k\\times d}\\) the concatenated memory features (keys and values). We first enrich both with their corresponding state embeddings \\(\\mathbf{E}_q\\) and \\(\\mathbf{E}_k\\):" + }, + { + "type": "equation", + "bbox": [ + 0.376, + 0.766, + 0.825, + 0.784 + ], + "angle": 0, + "content": "\\[\n\\tilde {\\mathbf {X}} _ {q} = \\mathbf {X} _ {q} + \\mathbf {E} _ {q}, \\quad \\tilde {\\mathbf {X}} _ {k} = \\mathbf {X} _ {k} + \\mathbf {E} _ {k}. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.792, + 0.767, + 0.807 + ], + "angle": 0, + "content": "Cross-attention is then applied to retrieve relevant memory content and output updated \\(\\mathbf{X}^{\\prime}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.813, + 0.825, + 0.831 + ], + "angle": 0, + "content": "\\[\n\\mathbf {X} ^ {\\prime} = \\operatorname {C r o s s A t t n} (Q = p _ {q} (\\tilde {\\mathbf {X}} _ {q}), K = p _ {k} (\\tilde {\\mathbf {X}} _ {k}), V = p _ {v} (\\mathbf {X} _ {k})), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.836, + 0.48, + 0.852 + ], + "angle": 0, + "content": "where \\(p_q, p_k\\), and \\(p_v\\) are learnable projections." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.913 + ], + "angle": 0, + "content": "To simplify the reasoning space, we adopt a relative state formulation. For each query frame, the state is set to a zero reference (e.g., the pose is reset to the identity and the timestamp to zero), while the states of key frames are normalized to relative values. This design, illustrated in Figure 2(d), improves alignment under viewpoint changes and simplifies the learning objective." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.947 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.096, + 0.488, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.247, + 0.49, + 0.303 + ], + "angle": 0, + "content": "Figure 4: Within context window evaluation. The motion sequence involves turning right and returning to the original position, showing self-contained consistency." + }, + { + "type": "table_caption", + "bbox": [ + 0.174, + 0.307, + 0.396, + 0.321 + ], + "angle": 0, + "content": "Table 1: Evaluation on Minecraft" + }, + { + "type": "table", + "bbox": [ + 0.155, + 0.33, + 0.428, + 0.499 + ], + "angle": 0, + "content": "
Within context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.14.350.069113.87
DF20.560.009413.88
Ours21.010.007213.73
Beyond context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.///
DF18.040.437651.28
Ours19.320.142915.37
" + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.094, + 0.821, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.247, + 0.828, + 0.304 + ], + "angle": 0, + "content": "Figure 5: Beyond context window evaluation. Diffusion-Forcing suffers inconsistency over time, while ours maintains quality and recovers past scenes." + }, + { + "type": "table_caption", + "bbox": [ + 0.465, + 0.307, + 0.735, + 0.323 + ], + "angle": 0, + "content": "Table 2: Ablation on embedding designs" + }, + { + "type": "table", + "bbox": [ + 0.449, + 0.33, + 0.821, + 0.398 + ], + "angle": 0, + "content": "
Pose typeEmbed. typePSNR ↑LPIPS ↓rFID ↓
SparseAbsolute14.670.288739.23
DenseAbsolute17.630.183029.34
DenseRelative19.320.142915.37
" + }, + { + "type": "table_caption", + "bbox": [ + 0.448, + 0.409, + 0.751, + 0.424 + ], + "angle": 0, + "content": "Table 3: Ablation on memory retrieve strategy" + }, + { + "type": "table", + "bbox": [ + 0.449, + 0.431, + 0.784, + 0.498 + ], + "angle": 0, + "content": "
StrategyPSNR ↑LPIPS ↓rFID ↓
Random12.320.322447.35
+ Confidence Filter17.120.186324.33
+ Similarity Filter19.320.142915.37
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.524, + 0.825, + 0.595 + ], + "angle": 0, + "content": "Incorporating memory into pipeline. We incorporate memory frames into the pipeline by treating them as clean inputs during both training and inference. As shown in Figure 2 (a-b), during training, memory frames are assigned the lowest noise level \\( k_{\\mathrm{min}} \\), while context window frames receive independently sampled noise levels from the range \\( [k_{\\mathrm{min}}, k_{\\mathrm{max}}] \\). During inference, both memory and context frames are assigned \\( k_{\\mathrm{min}} \\), while the current generating frames are assigned \\( k_{\\mathrm{max}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.6, + 0.766, + 0.617 + ], + "angle": 0, + "content": "To restrict memory influence only to memory blocks, we apply a temporal attention mask:" + }, + { + "type": "equation", + "bbox": [ + 0.367, + 0.623, + 0.826, + 0.673 + ], + "angle": 0, + "content": "\\[\nA _ {\\text {m a s k}} (i, j) = \\left\\{ \\begin{array}{l l} 1, & i \\leq L _ {M} \\text {a n d} j = i \\\\ 1, & i > L _ {M} \\text {a n d} j \\leq i \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.681, + 0.825, + 0.71 + ], + "angle": 0, + "content": "where \\( L_{M} \\) is the number of memory frames that are appended before frames within the context window. This guarantees causal attention while preventing memory units from affecting each other." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.729, + 0.314, + 0.746 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.759, + 0.825, + 0.817 + ], + "angle": 0, + "content": "Datasets. We use MineDojo (Fan et al., 2022) to create diverse training and evaluation datasets in Minecraft, configuring diverse environments (e.g., plains, savannas, ice plains, and deserts), agent actions, and interactions. For real-world scenes, we utilize RealEstate10K (Zhou et al., 2018) with camera pose annotations to evaluate long-term world consistency." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.821, + 0.827, + 0.879 + ], + "angle": 0, + "content": "Metrics. For quantitative evaluation, we employ reconstruction metrics, where the method of obtaining ground truth (GT) varies by specific settings. We then assess the consistency and quality of the generated videos using PSNR, LPIPS (Zhang et al., 2018), and reconstruction FID (rFID) (Heusel et al., 2017), which collectively measure pixel-level fidelity, perceptual similarity, and overall realism." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.883, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Experimental details. For our experiments on Minecraft (Fan et al., 2022), we utilize the Oasis (Decart et al., 2024) as the base model. Our model is trained using the Adam optimizer with a fixed" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.092, + 0.495, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.504, + 0.093, + 0.811, + 0.25 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.253, + 0.825, + 0.296 + ], + "angle": 0, + "content": "Figure 6: Results on RealEstate (Zhou et al., 2018). We visualize loop closure consistency over a full camera rotation. The visual similarity between the first and last frames serves as a qualitative indicator of 3D spatial consistency." + }, + { + "type": "table_caption", + "bbox": [ + 0.37, + 0.309, + 0.625, + 0.322 + ], + "angle": 0, + "content": "Table 4: Evaluation on RealEstate10K" + }, + { + "type": "table", + "bbox": [ + 0.301, + 0.323, + 0.694, + 0.416 + ], + "angle": 0, + "content": "
MethodsPSNR ↑LPIPS ↓rFID ↓
CameraCtrl (He et al., 2024)13.190.3328133.81
TrajAttn (Xiao et al., 2024)14.220.3698128.36
Viewcrafter (Yu et al., 2024c)21.720.172958.43
DFoT (Song et al., 2025)16.420.2933110.34
Ours23.340.167243.14
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.426, + 0.827, + 0.538 + ], + "angle": 0, + "content": "learning rate of \\(2 \\times 10^{-5}\\). Training is conducted at a resolution of \\(640 \\times 360\\), where frames are first encoded into a latent space via a VAE at a resolution of \\(32 \\times 18\\), then further patchified to \\(16 \\times 9\\). Our training dataset comprises approximately 12K long videos, each containing 1500 frames, generated from Fan et al. (2022). During training, we employ an 8-frame temporal context window alongside an 8-frame memory window. The model is trained for approximately 500K steps using 4 GPUs, with a batch size of 4 per GPU. For the hyperparameters specified in Algorithm 1 of the main paper, we set the similarity threshold \\(tr\\) to 0.9, \\(w_{o}\\) to 1, and \\(w_{t}\\) to \\(0.2 / t_{c}\\). For the noise levels in Eq. (5) and Eq. (6), we set \\(k_{\\min}\\) to 15 and \\(k_{\\max}\\) to 1000." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.543, + 0.825, + 0.601 + ], + "angle": 0, + "content": "For our experiments on RealEstate10K (Zhou et al., 2018), we adopt DFoT (Song et al., 2025) as the base model. The RealEstate10K dataset provides a training set of approximately 65K short video clips. Training is conducted at a resolution of \\(256 \\times 256\\), with frames patched to \\(128 \\times 128\\). The model is trained for approximately 50K steps using 4 GPUs, with a batch size of 8 per GPU." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.617, + 0.455, + 0.631 + ], + "angle": 0, + "content": "4.1 Results on Generation Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.642, + 0.827, + 0.795 + ], + "angle": 0, + "content": "Comparisons on Minecraft Benchmark. We compare our approach with a standard full-sequence (Full Seq.) training method (He et al., 2024; Wang et al., 2024) and Diffusion Forcing (DF) (Chen et al., 2025). The key differences are as follows: the full-sequence conditional diffusion transformer (Peebles and Xie, 2023) maintains the same noise level during training and inference, DF introduces different noise levels for training and inference, and our method incorporates a memory mechanism. To assess both short-term and long-term world consistency, we conduct evaluations within and beyond the context window. We evaluate both settings on 300 test videos. In the following experiments, the agent's poses are generated by the game simulator as ground truth. However, in real-world scenarios, only the action input is available, and the pose is not directly observable. In such cases, the next-frame pose can be predicted based on the previous scenes, past states, and the upcoming action. We explore this design choice in the supplementary material." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Within context window. For this experiment, all methods use a context window of 16, while our approach additionally maintains a memory window of 8. We test on customized motion scenarios (e.g., turn left, then turn right or move forward, then backward) to assess self-contained consistency, where the ground truth consists of previously generated frames at the same positions. As shown in Table 1 and Figure 4, the full-sequence baseline suffers from inconsistencies even within its own context window. DF improves consistency by enabling greater information exchange among generated frames. Our memory-based approach achieves the best performance, demonstrating the effectiveness of integrating a dedicated memory mechanism." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.33, + 0.098, + 0.669, + 0.113 + ], + "angle": 0, + "content": "Table 5: Ablation on sampling strategy for training" + }, + { + "type": "table", + "bbox": [ + 0.322, + 0.113, + 0.673, + 0.184 + ], + "angle": 0, + "content": "
Sampling strategyPSNR ↑LPIPS ↓rFID ↓
Small-range13.230.378646.55
Large-range15.110.385542.96
Progressive19.320.142915.37
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.191, + 0.827, + 0.304 + ], + "angle": 0, + "content": "Beyond context window. In this setting, all methods use a context window of 8 and generate 100 future frames; our method further employs a memory window of 8 while initializing a 600-frame memory bank. We compute the reconstruction error using the subsequent 100 ground truth frames after 600 frames. Full-sequence methods can not roll out that long so we exclude it. DF exhibits poor PSNR and LPIPS scores, indicating severe inconsistency with the ground truth beyond the context window. Additionally, its low rFID suggests notable quality degradation. In contrast, our memory-augmented approach consistently outperforms others across all metrics, demonstrating superior long-term consistency and quality preservation. Figure 5 further substantiates these findings." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.308, + 0.825, + 0.392 + ], + "angle": 0, + "content": "Figure 3 showcases WORLDMEM's capabilities. The top section demonstrates its ability to operate in a free action space across diverse environments. Given a 600-frame memory bank, our model generates 100 future frames while preserving the ground truth's actions and poses, ensuring strong world consistency. The bottom section highlights dynamic environment interaction. By using timestamps as embeddings, the model remembers environmental changes and captures natural event evolution, such as plant growth over time." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.398, + 0.827, + 0.468 + ], + "angle": 0, + "content": "Comparisons on Real Scenarios. We compare our method with prior works (He et al., 2024; Xiao et al., 2024; Yu et al., 2024c; Song et al., 2025) on the RealEstate10K dataset (Zhou et al., 2018). We design 5 evaluation trajectories, each starting and ending at the same pose, across 100 scenes. The trajectory lengths range from 37 to 60 frames – exceeding the training lengths of all baselines (maximum 25 frames)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.473, + 0.825, + 0.572 + ], + "angle": 0, + "content": "CameraCtrl (He et al., 2024), TrajAttn (Xiao et al., 2024), and DFoT (Song et al., 2025) discard past frames and suffer from inconsistency. Viewcrafter (Yu et al., 2024c) incorporates explicit 3D reconstruction, yielding better results, but is constrained by errors in post-processing such as reconstruction and rendering. As shown in Table 4 and Figure 6, our approach achieves superior performance across all metrics. However, the RealEstate dataset inherently limits the full potential of our method, as it consists of short, non-interactive clips with limited temporal complexity. We leave evaluation under more challenging and interactive real-world scenarios for future work." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.589, + 0.275, + 0.603 + ], + "angle": 0, + "content": "4.2 Ablation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.615, + 0.825, + 0.727 + ], + "angle": 0, + "content": "**Embedding designs.** The design of embeddings within the memory block is crucial for cross-frame relationship modeling. We evaluate three strategies (Table 2): (1) sparse pose embedding with absolute encoding, (2) dense pose embedding with absolute encoding, and (3) dense pose embedding with relative encoding. Results show that dense pose embeddings (Plücker embedding) significantly enhance all metrics, emphasizing the benefits of richer pose representations. Switching from absolute to relative encoding further improves performance, particularly in LPIPS and rFID, by facilitating relationship reasoning and information retrieval. As illustrated in Figure 7, absolute embeddings accumulate errors over time, while relative embeddings maintain stability even beyond 300 frames." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.732, + 0.825, + 0.803 + ], + "angle": 0, + "content": "Sampling strategy for training. We compare different sampling strategies during training in the Minecraft benchmark. Small-range sampling restricts memory conditioning to frames within \\(2\\mathrm{m}\\) in the Minecraft world, while large-range sampling extends this range to \\(8\\mathrm{m}\\). Progressive sampling, on the other hand, begins with small-range samples for initial training steps and then gradually expands to large-range samples." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.808, + 0.825, + 0.865 + ], + "angle": 0, + "content": "As shown in Table 5, both small-range and large-range sampling struggle with consistency and quality, whereas progressive sampling significantly improves all metrics. This suggests that gradually increasing difficulty during training helps the model learn to reason and effectively query information from memory blocks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Time condition. We ablate the effectiveness of the timestamp condition (for both embedding and retrieval) in Table 6. We curate 100 video samples featuring placing events and evaluate whether future generations align with event progression. As shown in the table, incorporating the time" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.094, + 0.48, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.241, + 0.492, + 0.394 + ], + "angle": 0, + "content": "Figure 7: Long-term Generation Comparison. This figure presents the PSNR of different ablation methods compared to the ground truth over a 300-frame sequence. The results show that our method without memory blocks or using random memory retrieval exhibits immediate inconsistencies with the ground truth. Additionally, the model lacking relative embeddings begins to degrade significantly beyond 100 frames. In contrast, our full method maintains strong consistency even beyond 300 frames." + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.101, + 0.822, + 0.178 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.185, + 0.827, + 0.269 + ], + "angle": 0, + "content": "Figure 8: Results w/o and w/ time condition. Without timestamps, the model fails to differentiate memory units from the same location at different times, causing errors. With time conditioning, it aligns with the updated world state, ensuring consistency." + }, + { + "type": "table_caption", + "bbox": [ + 0.547, + 0.307, + 0.786, + 0.321 + ], + "angle": 0, + "content": "Table 6: Ablation on time condition" + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.329, + 0.822, + 0.384 + ], + "angle": 0, + "content": "
Time conditionPSNR ↑LPIPS ↓rFID ↓
w/o17.170.198923.89
w/19.120.161316.53
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.412, + 0.825, + 0.469 + ], + "angle": 0, + "content": "condition significantly improves PSNR and LPIPS, indicating that adding temporal information helps the model faithfully reproduce event changes in world simulation. Since events like plant growth are inherently unpredictable, we do not conduct quantitative evaluations on such cases but instead provide qualitative illustrations in Figure 8." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.475, + 0.825, + 0.558 + ], + "angle": 0, + "content": "Memory retrieve strategy. We analyze memory retrieval strategies in Table 3. Random sampling from the memory bank leads to poor performance and severe quality degradation, as evidenced by a sharp drop in rFID and rapid divergence from the ground truth (Figure 7). The confidence-based filtering significantly enhances consistency and generation quality. Additionally, we refine retrieval by filtering out redundant memory units based on similarity, further improving all evaluation metrics and demonstrating the effectiveness of our approach." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.581, + 0.457, + 0.596 + ], + "angle": 0, + "content": "5 Limitations and Future works" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.614, + 0.825, + 0.712 + ], + "angle": 0, + "content": "Despite the effectiveness of our approach, certain issues warrant further exploration. First, we cannot guarantee that we can always retrieve all necessary information from the memory bank In some corner cases (e.g., when views are blocked by obstacles), relying solely on view overlap may be insufficient. Second, our current interaction with the environment lacks diversity and realism. In future work, we plan to extend our models to real-world scenarios with more realistic and varied interactions. Lastly, our memory design still entails linearly increasing memory usage, which may impose limitations when handling extremely long sequences." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.734, + 0.3, + 0.75 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.767, + 0.825, + 0.865 + ], + "angle": 0, + "content": "In conclusion, WOrLDMEM tackles the longstanding challenge of maintaining long-term consistency in world simulation by employing a memory bank of past frames and associated states. Its memory attention mechanism enables accurate reconstruction of previously observed scenes, even under large viewpoints or temporal gaps, and effectively models dynamic changes over time. Extensive experiments in both virtual and real settings confirm WOrLDMEM's capacity for robust, immersive world simulation. We hope our work will encourage further research on the design and applications of memory-based world simulators." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.87, + 0.827, + 0.914 + ], + "angle": 0, + "content": "Acknowledgements. This research is supported by the National Research Foundation, Singapore, under its NRF Fellowship Award . This research is also supported by NTU SUG-NAP, as well as cash and in-kind funding from NTU S-Lab and industry partner(s)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.113, + 0.829, + 0.153 + ], + "angle": 0, + "content": "Eloi Alonso, Adam Jelley, Vincent Micheli, Anssi Kanervisto, Amos J Storkey, Tim Pearce, and François Fleuret. Diffusion for world modeling: Visual details matter in atari. Advances in Neural Information Processing Systems, 37:58757-58791, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.161, + 0.794, + 0.177 + ], + "angle": 0, + "content": "Amir Bar, Gaoyue Zhou, Danny Tran, Trevor Darrell, and Yann LeCun. Navigation world models, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.185, + 0.827, + 0.223 + ], + "angle": 0, + "content": "Charles Beattie, Joel Z Leibo, Denis Teplyashin, Tom Ward, Marcus Wainwright, Heinrich Kuttler, Andrew Lefrancq, Simon Green, Víctor Valdés, Amir Sadik, et al. Deepmind lab. arXiv preprint arXiv:1612.03801, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.233, + 0.825, + 0.272 + ], + "angle": 0, + "content": "Boyuan Chen, Diego Martí Monsó, Yilun Du, Max Simchowitz, Russ Tedrake, and Vincent Sitzmann. Diffusion forcing: Next-token prediction meets full-sequence diffusion. Advances in Neural Information Processing Systems, 37:24081-24125, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.282, + 0.827, + 0.321 + ], + "angle": 0, + "content": "Haoxin Chen, Menghan Xia, Yingqing He, Yong Zhang, Xiaodong Cun, Shaoshu Yang, Jinbo Xing, Yaofang Liu, Qifeng Chen, Xintao Wang, et al. Videocraft1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.33, + 0.824, + 0.357 + ], + "angle": 0, + "content": "Decart, Julian Quevedo, Quinn McIntyre, Spruce Campbell, Xinlei Chen, and Robert Wachen. Oasis: A universe in a transformer. 2024. Project website." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.366, + 0.827, + 0.405 + ], + "angle": 0, + "content": "Linxi Fan, Guanzhi Wang, Yunfan Jiang, Ajay Mandlekar, Yuncong Yang, Haoyi Zhu, Andrew Tang, DeAn Huang, Yuke Zhu, and Anima Anandkumar. Minedojo: Building open-ended embodied agents with internet-scale knowledge. Advances in Neural Information Processing Systems, 35:18343-18362, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.415, + 0.826, + 0.454 + ], + "angle": 0, + "content": "Ruili Feng, Han Zhang, Zhantao Yang, Jie Xiao, Zhilei Shu, Zhiheng Liu, Andy Zheng, Yukun Huang, Yu Liu, and Hongyang Zhang. The matrix: Infinite-horizon world generation with real-time moving control. arXiv preprint arXiv:2412.03568, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.463, + 0.826, + 0.503 + ], + "angle": 0, + "content": "Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. arXiv preprint arXiv:2405.10314, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.512, + 0.827, + 0.551 + ], + "angle": 0, + "content": "Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.56, + 0.825, + 0.588 + ], + "angle": 0, + "content": "David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.597, + 0.724, + 0.611 + ], + "angle": 0, + "content": "David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.62, + 0.825, + 0.646 + ], + "angle": 0, + "content": "Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.656, + 0.824, + 0.682 + ], + "angle": 0, + "content": "Danijar Hafner, Timothy Lillicrap, Mohammad Norouzi, and Jimmy Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.691, + 0.826, + 0.718 + ], + "angle": 0, + "content": "Hao He, Yinghao Xu, Yuwei Guo, Gordon Wetzstein, Bo Dai, Hongsheng Li, and Ceyuan Yang. Cameractrol: Enabling camera control for text-to-video generation. arXiv preprint arXiv:2404.02101, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.727, + 0.825, + 0.766 + ], + "angle": 0, + "content": "Roberto Henschel, Levon Khachatryan, Daniil Hayrapetyan, Hayk Poghosyan, Vahram Tadevosyan, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Streamingt2v: Consistent, dynamic, and extendable long video generation from text. arXiv preprint arXiv:2403.14773, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.776, + 0.825, + 0.815 + ], + "angle": 0, + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.824, + 0.825, + 0.863 + ], + "angle": 0, + "content": "Yining Hong, Beide Liu, Maxine Wu, Yuanhao Zhai, Kai-Wei Chang, Linjie Li, Kevin Lin, Chung-Ching Lin, Jianfeng Wang, Zhengyuan Yang, Ying Nian Wu, and Lijuan Wang Wang. Slowfast-vgen: Slow-fast learning for action-driven long video generation. arXiv preprint arXiv:2410.23277, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.873, + 0.827, + 0.911 + ], + "angle": 0, + "content": "Anthony Hu, Lloyd Russell, Hudson Yeo, Zak Murez, George Fedoseev, Alex Kendall, Jamie Shotton, and Gianluca Corrado. Gaia-1: A generative world model for autonomous driving. arXiv preprint arXiv:2309.17080, 2023." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.113, + 0.829, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.12 + ], + "angle": 0, + "content": "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.129, + 0.827, + 0.168 + ], + "angle": 0, + "content": "Hanwen Jiang, Hao Tan, Peng Wang, Haian Jin, Yue Zhao, Sai Bi, Kai Zhang, Fujun Luan, Kalyan Sunkavalli, Qixing Huang, et al. Rayzer: A self-supervised large view synthesis model. arXiv preprint arXiv:2505.00702, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.179, + 0.826, + 0.218 + ], + "angle": 0, + "content": "Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.228, + 0.826, + 0.267 + ], + "angle": 0, + "content": "Jihwan Kim, Junoh Kang, Jinyoung Choi, and Bohyung Han. FIFO-diffusion: Generating infinite videos from text without training. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.278, + 0.826, + 0.355 + ], + "angle": 0, + "content": "Dan Kondratyuk, Lijun Yu, Xiuye Gu, José Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Josh Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A. Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.366, + 0.826, + 0.405 + ], + "angle": 0, + "content": "Hanwen Liang, Junli Cao, Vidit Goel, Guocheng Qian, Sergei Korolev, Demetri Terzopoulos, Konstantinos N Plataniotis, Sergey Tulyakov, and Jian Ren. Wonderland: Navigating 3d scenes from a single image. arXiv preprint arXiv:2412.12091, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.415, + 0.826, + 0.453 + ], + "angle": 0, + "content": "Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model. arXiv preprint arXiv:2408.16767, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.464, + 0.826, + 0.491 + ], + "angle": 0, + "content": "Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Ziwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. Latte: Latent diffusion transformer for video generation. arXiv preprint arXiv:2401.03048, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.501, + 0.826, + 0.528 + ], + "angle": 0, + "content": "OpenAI. Video generation models as world simulators. https://openai.com/research/video-generation-models-as-world-simulators, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.538, + 0.826, + 0.614 + ], + "angle": 0, + "content": "Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjeyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Roktaschel. Genie 2: A large-scale foundation world model. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.625, + 0.826, + 0.653 + ], + "angle": 0, + "content": "William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.662, + 0.826, + 0.702 + ], + "angle": 0, + "content": "Tanzila Rahman, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, Shweta Mahajan, and Leonid Sigal. Make-a-story: Visual memory conditioned consistent story generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2493-2502, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.712, + 0.826, + 0.763 + ], + "angle": 0, + "content": "Xuanchi Ren, Tianchang Shen, Jiahui Huang, Huan Ling, Yifan Lu, Merlin Nimier-David, Thomas Müller, Alexander Keller, Sanja Fidler, and Jun Gao. Gen3c: 3d-informed world-consistent video generation with precise camera control. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.773, + 0.826, + 0.826 + ], + "angle": 0, + "content": "Mehdi SM Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, et al. Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6229-6238, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.836, + 0.826, + 0.875 + ], + "angle": 0, + "content": "Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.885, + 0.826, + 0.912 + ], + "angle": 0, + "content": "Kiwhan Song, Boyuan Chen, Max Simchowitz, Yilun Du, Russ Tedrake, and Vincent Sitzmann. History-guided video diffusion. arXiv preprint arXiv:2502.06764, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.829, + 0.131 + ], + "angle": 0, + "content": "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.141, + 0.826, + 0.169 + ], + "angle": 0, + "content": "Dani Valevski, Yaniv Leviathan, Moab Arar, and Shlomi Fruchter. Diffusion models are real-time game engines. arXiv preprint arXiv:2408.14837, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.178, + 0.827, + 0.205 + ], + "angle": 0, + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.214, + 0.826, + 0.241 + ], + "angle": 0, + "content": "Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.25, + 0.824, + 0.278 + ], + "angle": 0, + "content": "Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.287, + 0.825, + 0.326 + ], + "angle": 0, + "content": "Jing Wang, Fengzhuo Zhang, Xiaoli Li, Vincent YF Tan, Tianyu Pang, Chao Du, Aixin Sun, and Zhuoran Yang. Error analyses of auto-regressive video diffusion models: A unified framework. arXiv preprint arXiv:2503.10704, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.336, + 0.825, + 0.375 + ], + "angle": 0, + "content": "Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.825, + 0.425 + ], + "angle": 0, + "content": "Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. Motionctrl: A unified and flexible motion controller for video generation. In ACM SIGGRAPH 2024 Conference Papers, pages 1-11, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.434, + 0.824, + 0.462 + ], + "angle": 0, + "content": "Sibo Wu, Congrong Xu, Binbin Huang, Andreas Geiger, and Anpei Chen. Genfusion: Closing the loop between reconstruction and generation via videos. arXiv preprint arXiv:2503.21219, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.471, + 0.826, + 0.509 + ], + "angle": 0, + "content": "Tong Wu, Zhihao Fan, Xiao Liu, Yeyun Gong, Yelong Shen, Jian Jiao, Hai-Tao Zheng, Juntao Li, Zhongyu Wei, Jian Guo, Nan Duan, and Weizhu Chen. Ar-diffusion: Auto-regressive diffusion model for text generation, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.52, + 0.824, + 0.56 + ], + "angle": 0, + "content": "Xindi Wu, Uriel Singer, Zhaojiang Lin, Andrea Madotto, Xide Xia, Yifan Xu, Paul Crook, Xin Luna Dong, and Seungwhan Moon. Corgi: Cached memory guided video generation. In 2025 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 4585-4594. IEEE, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.569, + 0.824, + 0.596 + ], + "angle": 0, + "content": "Zeqi Xiao, Wenqi Ouyang, Yifan Zhou, Shuai Yang, Lei Yang, Jianlou Si, and Xingang Pan. Trajectory attention for fine-grained video motion control. arXiv preprint arXiv:2411.19324, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.605, + 0.825, + 0.633 + ], + "angle": 0, + "content": "Jingjing Xu, Xu Sun, Zhiyuan Zhang, Guangxiang Zhao, and Junyang Lin. Understanding and improving layer normalization. Advances in neural information processing systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.642, + 0.826, + 0.669 + ], + "angle": 0, + "content": "Mengjiao Yang, Yilun Du, Kamyar Ghasemipour, Jonathan Tompson, Dale Schuurmans, and Pieter Abbeel. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 1(2):6, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.678, + 0.826, + 0.706 + ], + "angle": 0, + "content": "Tianwei Yin, Qiang Zhang, Richard Zhang, William T Freeman, Fredo Durand, Eli Shechtman, and Xun Huang. From slow bidirectional to fast causal video generators. arXiv preprint arXiv:2412.07772, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.714, + 0.824, + 0.742 + ], + "angle": 0, + "content": "Hong-Xing Yu, Haoyi Duan, Charles Herrmann, William T Freeman, and Jiajun Wu. Wonderworld: Interactive 3d scene generation from a single image. arXiv preprint arXiv:2406.09394, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.75, + 0.826, + 0.801 + ], + "angle": 0, + "content": "Hong-Xing Yu, Haoyi Duan, Junhwa Hur, Kyle Sargent, Michael Rubinstein, William T Freeman, Forrester Cole, Deqing Sun, Noah Snavely, Jiajun Wu, et al. Wonderjourney: Going from anywhere to everywhere. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6658-6667, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.812, + 0.826, + 0.84 + ], + "angle": 0, + "content": "Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, Kun Gai, Hao Chen, and Xihui Liu. A survey of interactive generative video. arXiv preprint arXiv:2504.21853, 2025a." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.849, + 0.826, + 0.876 + ], + "angle": 0, + "content": "Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Position: Interactive generative video as next-generation game engine. arXiv preprint arXiv:2503.17359, 2025b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.885, + 0.824, + 0.912 + ], + "angle": 0, + "content": "Jiwen Yu, Yiran Qin, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Gamefactory: Creating new games with generative interactive videos. arXiv preprint arXiv:2501.08325, 2025c." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.092, + 0.829, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.131 + ], + "angle": 0, + "content": "Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.14, + 0.826, + 0.18 + ], + "angle": 0, + "content": "Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.188, + 0.826, + 0.226 + ], + "angle": 0, + "content": "Longtao Zheng, Yifan Zhang, Hanzhong Guo, Jiachun Pan, Zhenxiong Tan, Jiahao Lu, Chuanxin Tang, Bo An, and Shuicheng Yan. Memo: Memory-guided diffusion for expressive talking video generation. arXiv preprint arXiv:2412.04448, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.235, + 0.826, + 0.263 + ], + "angle": 0, + "content": "Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. Open-sora: Democratizing efficient video production for all, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.271, + 0.826, + 0.298 + ], + "angle": 0, + "content": "Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images. In SIGGRAPH, 2018." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.42, + 0.108 + ], + "angle": 0, + "content": "7 Supplementary Materials" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.121, + 0.386, + 0.137 + ], + "angle": 0, + "content": "7.1 Details and Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.147, + 0.828, + 0.19 + ], + "angle": 0, + "content": "**Embedding designs.** We present the detailed designs of embeddings for timesteps, actions, poses, and timestamps in Figure 10, where \\( F, C, H, W, A \\) denote the frame number, channel count, height, width, and action count, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.195, + 0.825, + 0.223 + ], + "angle": 0, + "content": "The input pose is parameterized by position \\((x,z,y)\\) and orientation (pitch \\(\\theta\\) and yaw \\(\\phi\\)). The extrinsic matrix \\(\\mathbf{T} \\in \\mathbb{R}^{4 \\times 4}\\) is formed as:" + }, + { + "type": "equation", + "bbox": [ + 0.442, + 0.228, + 0.826, + 0.263 + ], + "angle": 0, + "content": "\\[\n\\mathbf {T} = \\left[ \\begin{array}{l l} \\mathbf {R} _ {c} & \\mathbf {c} \\\\ \\mathbf {0} ^ {T} & 1 \\end{array} \\right], \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.481, + 0.288 + ], + "angle": 0, + "content": "where \\(\\mathbf{c} = (x,z,y)^T\\) and \\(\\mathbf{R}_c = \\mathbf{R}_y(\\phi)\\mathbf{R}_x(\\theta)\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.292, + 0.825, + 0.319 + ], + "angle": 0, + "content": "To encode camera pose, we adopt the Plücker embedding. Given a pixel \\((u,v)\\) with normalized camera coordinates:" + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.319, + 0.826, + 0.337 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\pi} _ {u v} = \\mathbf {K} ^ {- 1} [ u, v, 1 ] ^ {T}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.34, + 0.316, + 0.354 + ], + "angle": 0, + "content": "its world direction is:" + }, + { + "type": "equation", + "bbox": [ + 0.431, + 0.354, + 0.826, + 0.371 + ], + "angle": 0, + "content": "\\[\n\\mathbf {d} _ {u v} = \\mathbf {R} _ {c} \\boldsymbol {\\pi} _ {u v} + \\mathbf {c}. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.374, + 0.352, + 0.389 + ], + "angle": 0, + "content": "The Plücker embedding is:" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.387, + 0.826, + 0.405 + ], + "angle": 0, + "content": "\\[\n\\mathbf {l} _ {u v} = \\left(\\mathbf {c} \\times \\mathbf {d} _ {u v}, \\mathbf {d} _ {u v}\\right) \\in \\mathbb {R} ^ {6}. \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.407, + 0.507, + 0.423 + ], + "angle": 0, + "content": "For a frame of size \\( H \\times W \\), the full embedding is:" + }, + { + "type": "equation", + "bbox": [ + 0.442, + 0.43, + 0.826, + 0.448 + ], + "angle": 0, + "content": "\\[\n\\mathbf {L} _ {i} \\in \\mathbb {R} ^ {H \\times W \\times 6}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.462, + 0.826, + 0.533 + ], + "angle": 0, + "content": "Memory context length. We evaluate how different memory context lengths affect performance in the Minecraft benchmark. Table 7 shows that increasing the context length from 1 to 8 steadily boosts PSNR, lowers LPIPS, and reduces rFID. However, extending the length to 16 deteriorates results, indicating that excessive memory frames may introduce noise or reduce retrieval precision. A context length of 8 provides the best trade-off, yielding the highest PSNR and the lowest LPIPS and rFID." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.827, + 0.678 + ], + "angle": 0, + "content": "Pose prediction. For interactive play, ground truth poses are not accessible. To address this, we designed a lightweight pose prediction module that estimates the pose of the next frame. As illustrated in Figure 9, the predictor takes the previous image, the previous pose, and the upcoming action as inputs and outputs the predicted next pose. This module enables the system to operate using actions alone, eliminating the need for ground truth poses during inference. In Table 8, we compare the performance of using predicted poses versus ground truth poses. While using ground truth poses yields better results across all metrics, the performance drop with predicted poses is acceptable. This is because our method does not rely heavily on precise pose predictions – new frames are generated based on these predictions – and the ground truth poses generated by the Minecraft simulator also contain a certain degree of randomness." + }, + { + "type": "table_caption", + "bbox": [ + 0.32, + 0.699, + 0.677, + 0.714 + ], + "angle": 0, + "content": "Table 7: Ablation on length of memory context length" + }, + { + "type": "table", + "bbox": [ + 0.36, + 0.715, + 0.64, + 0.799 + ], + "angle": 0, + "content": "
LengthPSNR ↑LPIPS ↓rFID ↓
116.180.189920.47
418.680.156816.54
819.320.142915.37
1617.140.168718.33
" + }, + { + "type": "table_caption", + "bbox": [ + 0.25, + 0.829, + 0.748, + 0.844 + ], + "angle": 0, + "content": "Table 8: Comparison between using predicted poses and ground truth poses" + }, + { + "type": "table", + "bbox": [ + 0.341, + 0.845, + 0.659, + 0.902 + ], + "angle": 0, + "content": "
Pose TypePSNR ↑LPIPS ↓rFID ↓
Ground truth19.320.142915.37
Predicted17.130.178620.36
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.348, + 0.099, + 0.652, + 0.257 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.269, + 0.622, + 0.285 + ], + "angle": 0, + "content": "Figure 9: Structure of pose predictor." + }, + { + "type": "image", + "bbox": [ + 0.348, + 0.311, + 0.645, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.347, + 0.479, + 0.476, + 0.493 + ], + "angle": 0, + "content": "(a) Timestep embedding" + }, + { + "type": "image_caption", + "bbox": [ + 0.521, + 0.48, + 0.638, + 0.493 + ], + "angle": 0, + "content": "(b) Action embedding" + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.519, + 0.645, + 0.688 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.359, + 0.697, + 0.465, + 0.71 + ], + "angle": 0, + "content": "(c) Pose embedding" + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.697, + 0.649, + 0.71 + ], + "angle": 0, + "content": "(d) Timestamp embedding" + }, + { + "type": "image_caption", + "bbox": [ + 0.341, + 0.725, + 0.655, + 0.74 + ], + "angle": 0, + "content": "Figure 10: Illustration of different embeddings." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.768, + 0.491, + 0.784 + ], + "angle": 0, + "content": "7.2 Memory Usage and Scalability Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.825, + 0.823 + ], + "angle": 0, + "content": "To assess the scalability and practical feasibility of our method, we provide detailed quantitative analysis covering memory usage, generation duration, training cost, and inference efficiency." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.839, + 0.825, + 0.869 + ], + "angle": 0, + "content": "Memory Usage of the Memory Bank. The memory bank is lightweight. Storing 600 visual memory tokens with shape [600, 16, 18, 32] in float32 takes approximately 21MB." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Retrieval Latency. Below we report the average retrieval time (for 8 memory frames) as a function of memory bank size:" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.341, + 0.098, + 0.652, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.322, + 0.271, + 0.673, + 0.287 + ], + "angle": 0, + "content": "Figure 11: Two-view FOV overlapping visualization." + }, + { + "type": "table", + "bbox": [ + 0.315, + 0.313, + 0.684, + 0.4 + ], + "angle": 0, + "content": "
Number of Memory CandidatesRetrieval Time (s)
100.04
1000.06
6000.10
10000.16
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.825, + 0.44 + ], + "angle": 0, + "content": "The generation cost (20 denoising steps) is \\(\\sim 0.9\\)s per frame. Retrieval time accounts for only \\(10 - 20\\%\\) of total inference time even with 1000 candidates." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.457, + 0.826, + 0.501 + ], + "angle": 0, + "content": "Comparison with Baseline. We compare our method with a baseline model (without memory), under consistent settings: 8 context frames, 8 memory frames, 20 denoising steps, and no acceleration techniques, on single H200." + }, + { + "type": "table", + "bbox": [ + 0.245, + 0.514, + 0.754, + 0.587 + ], + "angle": 0, + "content": "
MethodTrainingInference
Mem. UsageSpeed (it/s)Mem. UsageSpeed (it/s)
w/o Memory33 GB3.199 GB1.03
with Memory51 GB1.7611 GB0.89
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.599, + 0.825, + 0.628 + ], + "angle": 0, + "content": "Adding memory introduces moderate training overhead. During inference, the impact is minimal: only a small increase in memory usage and a slight decrease in speed." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.644, + 0.826, + 0.674 + ], + "angle": 0, + "content": "Inference Optimization. With modern acceleration techniques (e.g., timestep distillation, early exit, sparse attention), inference speed can reach \\(\\sim 10\\) FPS, making our method practical for deployment." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.678, + 0.825, + 0.709 + ], + "angle": 0, + "content": "FOV Overlapping Computation. We present the details of Monte Carlo-based FOV overlapping computation in Alg. 11, as well as the two-view overlapping sampling in Figure 11." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.726, + 0.312, + 0.74 + ], + "angle": 0, + "content": "7.3 Visualizations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.78, + 0.769 + ], + "angle": 0, + "content": "In this section, we provide more visualization of different aspects to facilitate understanding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.825, + 0.816 + ], + "angle": 0, + "content": "Minecraft Training Examples. We present a diverse set of training environments that include various terrain types, action spaces, and weather conditions, as shown in Figure 12. These variations help enhance the model's adaptability and robustness in different scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.821, + 0.825, + 0.864 + ], + "angle": 0, + "content": "Trajectory Examples in Minecraft. Figure 13 illustrates trajectory examples in the x-z space over 100 frames. The agent's movement exhibits a random action pattern, ensuring diverse learning objectives and a broad range of sampled experiences." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Pose Distribution. We collect and visualize 800 samples within a sampling range of 8, as shown in Figure 14. The random pattern observed in Figure 14 ensures a diverse distribution of sampled poses in space, which is beneficial for learning the reasoning process within the memory blocks." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.095, + 0.731, + 0.111 + ], + "angle": 0, + "content": "Algorithm 2: Monte Carlo-based FOV Overlap Computation (Notationally Disjoint)" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.113, + 0.221, + 0.127 + ], + "angle": 0, + "content": "Input:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.125, + 0.811, + 0.155 + ], + "angle": 0, + "content": "- \\( Q_{\\mathrm{ref}} \\in \\mathbb{R}^{F \\times 5} \\): reference poses from memory bank (x,y,z,pitch,yaw), \\( F \\) is the number of stored poses." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.158, + 0.534, + 0.174 + ], + "angle": 0, + "content": "- \\(Q_{\\mathrm{tgt}} \\in \\mathbb{R}^5\\): pose of the current (target) frame." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.178, + 0.561, + 0.193 + ], + "angle": 0, + "content": "- \\(M\\): number of 3D sample points (default 10,000)." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.197, + 0.549, + 0.212 + ], + "angle": 0, + "content": "- \\(R\\): radius of the sampling sphere (default \\(30\\mathrm{m}\\))." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.216, + 0.629, + 0.231 + ], + "angle": 0, + "content": "- \\(\\phi_h\\), \\(\\phi_v\\): horizontal/vertical field-of-view angles (in degrees)." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.125, + 0.811, + 0.231 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.235, + 0.234, + 0.249 + ], + "angle": 0, + "content": "Output:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.252, + 0.736, + 0.269 + ], + "angle": 0, + "content": "- \\(\\rho \\in \\mathbb{R}^F\\): overlapping ratios between each reference pose and the target pose." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.273, + 0.216, + 0.286 + ], + "angle": 0, + "content": "begin" + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.285, + 0.485, + 0.299 + ], + "angle": 0, + "content": "\\(\\triangle\\) Step 1: Random Sampling in a Sphere" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.299, + 0.591, + 0.313 + ], + "angle": 0, + "content": "Generate \\(M\\) points \\(\\mathbf{q}\\) uniformly in a 3D sphere of radius \\(R\\):" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.32, + 0.608, + 0.337 + ], + "angle": 0, + "content": "\\[\n\\mathbf {q} \\leftarrow \\text {P o i n t S a m p l i n g} (M, R).\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.196, + 0.343, + 0.509, + 0.359 + ], + "angle": 0, + "content": "\\(\\Delta\\) Step 2: Translate Points to \\(Q_{\\mathrm{tgt}}\\) as Center" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.358, + 0.802, + 0.373 + ], + "angle": 0, + "content": "Let \\( Q_{\\mathrm{tgt}}(x,y,z) \\) be the 3D coordinates of the current camera pose. Shift all sampled points:" + }, + { + "type": "equation", + "bbox": [ + 0.418, + 0.38, + 0.586, + 0.396 + ], + "angle": 0, + "content": "\\[\n\\mathbf {q} \\leftarrow \\mathbf {q} + Q _ {\\mathrm {t g t}} (x, y, z).\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.402, + 0.361, + 0.417 + ], + "angle": 0, + "content": "\\(\\Delta\\) Step 3: FOV Checks" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.415, + 0.801, + 0.446 + ], + "angle": 0, + "content": "Compute a boolean matrix \\(\\mathbf{v}_{\\mathrm{ref}} \\in \\{0,1\\}^{F \\times M}\\), where each entry indicates if a point in \\(\\mathbf{q}\\) lies in the FOV of a reference pose:" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.452, + 0.642, + 0.479 + ], + "angle": 0, + "content": "\\[\n\\mathbf {v} _ {\\mathrm {r e f}} \\leftarrow \\operatorname {I s I n s i d e F O V} \\big (\\mathbf {q}, Q _ {\\mathrm {r e f}}, \\phi_ {h}, \\phi_ {v} \\big).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.205, + 0.485, + 0.678, + 0.503 + ], + "angle": 0, + "content": "Similarly, compute a boolean vector \\(\\mathbf{v}_{\\mathrm{tg}} \\in \\{0,1\\}^{M}\\) for the target pose:" + }, + { + "type": "equation", + "bbox": [ + 0.363, + 0.509, + 0.642, + 0.536 + ], + "angle": 0, + "content": "\\[\n\\mathbf {v} _ {\\mathrm {t g t}} \\leftarrow \\operatorname {I s I n s i d e F O V} \\big (\\mathbf {q}, Q _ {\\mathrm {t g t}}, \\phi_ {h}, \\phi_ {v} \\big).\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.541, + 0.498, + 0.556 + ], + "angle": 0, + "content": "\\(\\Delta\\) Step 4: Overlapping Ratio Computation" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.555, + 0.796, + 0.571 + ], + "angle": 0, + "content": "Obtain the final overlapping ratio vector \\(\\pmb {\\rho}\\in \\mathbb{R}^{F}\\) by combining \\(\\mathbf{v}_{\\mathrm{ref}}\\) and \\(\\mathbf{v}_{\\mathrm{tgt}}\\). For instance," + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.579, + 0.623, + 0.622 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {\\rho} [ i ] = \\frac {1}{M} \\sum_ {j = 1} ^ {M} \\left(\\mathbf {v} _ {\\mathrm {r e f}} [ i, j ] \\cdot \\mathbf {v} _ {\\mathrm {t g t}} [ j ]\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.203, + 0.628, + 0.801, + 0.657 + ], + "angle": 0, + "content": "to measure the fraction of sampled points that are visible in both the \\(i\\)-th reference pose and the target pose." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.657, + 0.27, + 0.671 + ], + "angle": 0, + "content": "Return \\(\\rho\\)" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.67, + 0.203, + 0.682 + ], + "angle": 0, + "content": "end" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.825, + 0.747 + ], + "angle": 0, + "content": "More Qualitative Results. For additional qualitative examples, we recommend consulting the attached web page, which offers enhanced visualizations." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.18, + 0.12, + 0.338, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.12, + 0.498, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.12, + 0.659, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.12, + 0.82, + 0.19 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.192, + 0.338, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.192, + 0.498, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.192, + 0.659, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.192, + 0.82, + 0.262 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.264, + 0.338, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.264, + 0.498, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.264, + 0.659, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.264, + 0.82, + 0.333 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.335, + 0.338, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.335, + 0.498, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.335, + 0.659, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.335, + 0.82, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.181, + 0.407, + 0.338, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.407, + 0.498, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.407, + 0.659, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.406, + 0.82, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.488, + 0.828, + 0.517 + ], + "angle": 0, + "content": "Figure 12: Training Examples. Our training environments encompass diverse terrains, action spaces, and weather conditions, providing a comprehensive setting for learning." + }, + { + "type": "image", + "bbox": [ + 0.275, + 0.581, + 0.72, + 0.841 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.851, + 0.825, + 0.88 + ], + "angle": 0, + "content": "Figure 13: Visualization of Trajectory Examples in the X-Z Space. The axis scales represent distances within the Minecraft environment." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.276, + 0.318, + 0.71, + 0.64 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.65, + 0.825, + 0.679 + ], + "angle": 0, + "content": "Figure 14: Visualization of Relative Pose Distribution for Training in X-Z Space. Red dots indicate positions, while yellow arrows represent directions." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_origin.pdf b/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..e35380b6493e2c59637bbf0438cae8b1cca98f11 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/3a2c027d-7926-4954-8a0d-d286f9d6a3ea_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b9c02e5159ae54c2c8b60b312b18b1999800a44d25455c6fae804aa2e705d5d +size 4597473 diff --git a/data/2025/2504_12xxx/2504.12369/full.md b/data/2025/2504_12xxx/2504.12369/full.md new file mode 100644 index 0000000000000000000000000000000000000000..85feb4f163d5ecfdf5060943302d87bbb3d1e74f --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/full.md @@ -0,0 +1,542 @@ +# WORLDMEM: Long-term Consistent World Simulation with Memory + +Zeqi Xiao $^{1}$ Yushi Lan $^{1}$ Yifan Zhou $^{1}$ Wenqi Ouyang $^{1}$ Shuai Yang $^{2}$ Yanhong Zeng $^{3}$ Xingang Pan $^{1}$ + +$^{1}$ S-Lab, Nanyang Technological University, + +$^{2}$ Wangxuan Institute of Computer Technology, Peking University + +3Shanghai AI Laboratory + +{zeqi001, yushi001, yifan006, wenqi.ouyang, xingang.pan}@ntu.edu.sg + +williamyang@pku.edu.cn, zengyh1900@gmail.com + +# Abstract + +World simulation has gained increasing popularity due to its ability to model virtual environments and predict the consequences of actions. However, the limited temporal context window often leads to failures in maintaining long-term consistency, particularly in preserving 3D spatial consistency. In this work, we present WOrLD-MEM, a framework that enhances scene generation with a memory bank consisting of memory units that store memory frames and states (e.g., poses and timestamps). By employing state-aware memory attention that effectively extracts relevant information from these memory frames based on their states, our method is capable of accurately reconstructing previously observed scenes, even under significant viewpoint or temporal gaps. Furthermore, by incorporating timestamps into the states, our framework not only models a static world but also captures its dynamic evolution over time, enabling both perception and interaction within the simulated world. Extensive experiments in both virtual and real scenarios validate the effectiveness of our approach. Project page at https://xizaoqu.github.io/worldmem. + +# 1 Introduction + +World simulation has gained significant attention for its ability to model environments and predict the outcomes of actions (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024). Recent advances in video diffusion models have further propelled this field, enabling high-fidelity rollouts of potential future scenarios based on user actions, such as navigating through an environment or interacting with objects. These capabilities make world simulators particularly promising for applications in autonomous navigation (Feng et al., 2024; Bar et al., 2024) and as viable alternatives to traditional game engines (Decart et al., 2024; Parker-Holder et al., 2024). + +Despite these advances, a fundamental challenge remains: the limited probing horizon. Due to computational and memory constraints, video generative models operate within a fixed context window and are unable to condition on the full sequence of past generations. Consequently, most existing methods simply discard previously generated content, leading to a critical issue of world inconsistency, which is also revealed in Wang et al. (2025). As illustrated in Figure 1(a), when the camera moves away and returns, the regenerated content diverges from the earlier scene, violating the coherence expected in a consistent world. + +A natural solution is to maintain an external memory that stores and retrieves relevant historical information outside the generative loop. While intuitive, formulating such a memory mechanism is + +![](images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg) +Figure 1: WORLDMEM enables long-term consistent world generation with an integrated memory mechanism. (a) Previous world generation methods typically face the problem of inconsistent world due to limited temporal context window size. (b) WORLDMEM empowers the agent to explore diverse and consistent worlds with an expansive action space, e.g., crafting environments by placing objects like pumpkin light or freely roaming around. Most importantly, after exploring for a while and glancing back, we find the objects we placed are still there, with the inspiring sight of the light melting the surrounding snow, testifying to the passage of time. Red and green boxes indicate scenes that should be consistent. + +non-trivial. A direct approach might involve explicit 3D scene reconstruction to preserve geometry and detail. However, 3D representations are inflexible in dynamic and evolving environments and are prone to loss of detail, especially for large, unbounded scenes (Wu et al., 2025a). + +Instead, we argue that geometry-free representations offer a more flexible solution. These representations, however, pose their own challenges – particularly in balancing detail retention with memory scalability. For example, implicit approaches like storing abstract features via LoRA modules (Hong et al., 2024) offer compactness but lose visual fidelity and spatial specificity. Some recent works represent visual scenes as discrete tokens encoding fine-grained visual information (Sajjadi et al., 2022; Jiang et al., 2025), but they are limited by a fixed token and struggle to capture the complexity of diverse and evolving environments. To address this issue, we observe that for generating the immediate future, only a small subset of historical content is typically relevant. Based on this, we propose a token-level memory bank that stores all previously generated latent tokens, and retrieves a targeted subset for each generation step based on relevance. + +Conditioning on the retrieved memory requires spatial-temporal reasoning. In contrast to prior work where memory aids local temporal smoothness (Zheng et al., 2024a) or semantic coherence (Wu et al., 2025b; Rahman et al., 2023), long-term world simulation demands reasoning over large spatiotemporal gaps, e.g., memory and query may differ in viewpoint and time, and retain exact scenes with detail. To facilitate this reasoning, we propose augmenting each memory unit with explicit state cues, including spatial location, viewpoint, and timestamp. These cues serve as anchors for reasoning and are embedded as part of the query-key attention mechanism. Through this state-aware attention, our model can effectively reason the current frame with past observations, facilitating accurate and coherent generation. Importantly, such a design leverages standard attention architectures, enabling it to scale naturally with modern hardware and model capacity. + +Motivated by this idea, we build our approach, WOrLDMEM, on top of the Conditional Diffusion Transformer (CDiT) (Peebles and Xie, 2023) and the Diffusion Forcing (DF) paradigm (Chen et al., 2025), which autoregressively generates first-person viewpoints conditioned on external action signals. As discussed above, at the core of WOrLDMEM is a memory mechanism composed of a memory bank and memory attention. To ensure efficient and relevant memory retrieval from the bank, we introduce a confidence-based selection strategy that scores memory units based on field-of-view + +(FOV) overlap and temporal proximity. In the memory attention, the latent tokens being generated act as queries, attending to the memory tokens (as keys and values) to incorporate relevant historical context. To ensure robust correspondence across varying viewpoints and time gaps, we enrich both queries and keys with state-aware embeddings. A relative embedding design is introduced to ease the learning of spatial and temporal relationships. This pipeline enables precise, scalable reasoning over long-range memory, ensuring consistency in dynamic and evolving world simulations. + +We evaluate WOrLDMEM on a customized Minecraft benchmark (Fan et al., 2022) and on RealEstate10K (Zhou et al., 2018). The Minecraft benchmark includes diverse terrains (e.g., plains, savannas, and deserts) and various action modalities (movement, viewpoint control, and event triggers), which is a wonderful environment for idea verification. Extensive experiments show that WOrLDMEM significantly improves 3D spatial consistency, enabling robust viewpoint reasoning and high-fidelity scene generation, as shown in Figure 1(b). Furthermore, in dynamic environments, WOrLDMEM accurately tracks and follows evolving events and environment changes, demonstrating its ability to both perceive and interact with the generated world. We hope our promising results and scalable designs will inspire future research on memory-based world simulation. + +# 2 Related Work + +Video diffusion model. With the rapid advancement of diffusion models (Song et al., 2020; Peebles and Xie, 2023; Chen et al., 2025), video generation has made significant strides (Wang et al., 2023a,b; Chen et al., 2023; Guo et al., 2023; OpenAI, 2024; Jin et al., 2024; Yin et al., 2024). The field has evolved from traditional U-Net-based architectures (Wang et al., 2023a; Chen et al., 2023; Guo et al., 2023) to Transformer-based frameworks (OpenAI, 2024; Ma et al., 2024; Zheng et al., 2024b), enabling video diffusion models to generate highly realistic and temporally coherent videos. Recently, autoregressive video generation (Chen et al., 2025; Kim et al., 2024; Henschel et al., 2024) has emerged as a promising approach to extend video length, theoretically indefinitely. Notably, Diffusion Forcing (Chen et al., 2025) introduces a per-frame noise-level denoising paradigm. Unlike the full-sequence paradigm, which applies a uniform noise level across all frames, per-frame noise-level denoising offers a more flexible approach, enabling autoregressive generation. + +Interactive world simulation. World simulation aims to model an environment by predicting the next state given the current state and action. This concept has been extensively explored in the construction of world models (Ha and Schmidhuber, 2018b) for agent learning (Ha and Schmidhuber, 2018a; Hafner et al., 2019, 2020; Hu et al., 2023; Beattie et al., 2016; Yang et al., 2023). With advances in video generation, high-quality world simulation with robust control has become feasible, leading to numerous works focusing on interactive world simulation (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024; Yu et al., 2025c,a,b). These approaches enable agents to navigate generated environments and interact with them based on external commands. + +However, due to context window limitations, such methods discard previously generated content, leading to inconsistencies in the simulated world, particularly in maintaining 3D spatial coherence. + +Consistent world simulation. Ensuring the consistency of a generated world is crucial for effective world simulation Wang et al. (2025). Existing approaches can be broadly categorized into two types: geometric-based and geometric-free. The geometric-based methods explicitly reconstruct the generated world into a 3D/4D representation (Liu et al., 2024; Gao et al., 2024; Wang and Agapito, 2024; Ren et al., 2025; Yu et al., 2024b,a; Liang et al., 2024). While this strategy can reliably maintain consistency, it imposes strict constraints on flexibility: Once the world is reconstructed, modifying or interacting with it becomes challenging. Geometric-free methods focus on implicit learning. Methods like Alonso et al. (2025); Valevski et al. (2024) ensure consistency by overfitting to predefined scenarios (e.g., specific CS:GO or DOOM maps), limiting scalability. StreamingT2V (Henschel et al., 2024) maintains long-term consistency by continuing on both global and local visual contexts from previous frames, while SlowFastGen (Hong et al., 2024) progressively trains LoRA (Hu et al., 2022) modules for memory recall. However, these methods rely on abstract representations, making accurate scene reconstruction challenging. In contrast, our approach retrieves information from previously generated frames and their states, ensuring world consistency without overfitting to specific scenarios. + +![](images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg) + +![](images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg) + +![](images/a94869f851f3a9f0a5887da9940203db5f53e43246aa8bb56a01ceb394a21328.jpg) +(c) State Embedding + +![](images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg) +(b) Input Difference +(d) Memory Block +Figure 2: Comprehensive overview of WOrLDMEM. The framework comprises a conditional diffusion transformer integrated with memory blocks, with a dedicated memory bank storing memory units from previously generated content. By retrieving these memory units from the memory bank and incorporating the information by memory blocks to guide generation, our approach ensures long-term consistency in world simulation. + +# 3 WORLDMEM + +This section details the methodology of WOrLDMEM. Sec. 3.1 introduces the relevant preliminaries, while Sec. 3.2 describes the interactive world simulator serving as our baseline. Sec. 3.3 and 3.4 present the core of our proposed memory mechanism. + +# 3.1 Preliminary + +Video diffusion models. Video diffusion models generate video sequences by iteratively denoising Gaussian noise through a learned reverse process: + +$$ +p _ {\theta} \left(\mathbf {x} _ {t} ^ {k - 1} \mid \mathbf {x} _ {t} ^ {k}\right) = \mathcal {N} \left(\mathbf {x} _ {t} ^ {k - 1}; \mu_ {\theta} \left(\mathbf {x} _ {t} ^ {k}, k\right), \sigma_ {k} ^ {2} \mathbf {I}\right), \tag {1} +$$ + +where all frames $(\mathbf{x}_t^k)_{1\leq t\leq T}$ share the same noise level $k$ and $T$ is the context window length. This full-sequence approach enables global guidance but lacks flexibility in sequence length and autoregressive generation. + +Autoregressive video generation. Autoregressive video generation aims to extend videos over the long term by predicting frames sequentially (Kondratyuk et al., 2024; Wu et al., 2023). While various methods exist for autoregressive generation, Diffusion Forcing (DF) (Chen et al., 2025) provides a neat and effective approach to achieve this. Specifically, DF introduces per-frame noise levels $k_{t}$ : + +$$ +p _ {\theta} \left(\mathbf {x} _ {t} ^ {k _ {t} - 1} \mid \mathbf {x} _ {t} ^ {k _ {t}}\right) = \mathcal {N} \left(\mathbf {x} _ {t} ^ {k _ {t} - 1}; \mu_ {\theta} \left(\mathbf {x} _ {t} ^ {k _ {t}}, k _ {t}\right), \sigma_ {k _ {t}} ^ {2} \mathbf {I}\right), \tag {2} +$$ + +Unlike full-sequence diffusion, DF generates video flexibly and stably beyond the training horizon. Autoregressive generation is a special case when only the last one or a few frames are noisy. With autoregressive video generation, long-term interactive world simulation becomes feasible. + +# 3.2 Interactive World Simulation + +Before introducing the memory mechanism, we first present our interactive world simulator, which models long video sequences using an auto-regressive conditional diffusion transformer. Interaction is achieved by embedding external control signals, primarily actions, into the model through dedicated conditioning modules (Parker-Holder et al., 2024; Decart et al., 2024; Yu et al., 2025c). + +Following prior work (Decart et al., 2024), we adopt a conditional Diffusion Transformer (DiT) (Peebles and Xie, 2023) architecture for video generation, and Diffusion Forecasting (DF) (Chen et al., + +2025) for autoregressive prediction. As shown in Figure 2(a), our model consists of multiple DiT blocks with spatial and temporal modules for spatiotemporal reasoning. The temporal module applies causal attention to ensure that each frame only attends to preceding frames. + +The actions are injected by first projected into the embedding space using a multi-layer perceptron (MLP). The resulting action embeddings are added to the denoising timestep embeddings and injected into the temporal blocks using Adaptive Layer Normalization (AdaLN) (Xu et al., 2019), following the paradigm of Bar et al. (2024); Decart et al. (2024). In our Minecraft experiments, the action space contains 25 dimensions, including movements, view adjustments, and event triggers. We also apply timestep embeddings to the spatial blocks in the same manner, although this is omitted from the figure for clarity. Standard architectural components such as residual connections, multi-head attention, and feedforward networks are also not shown. + +The combination of conditional DiT and DF provides a strong baseline for long-term interactive video generation. However, due to the computational cost of video synthesis, the temporal context window remains limited. As a result, content outside this window is forgotten, which leads to inconsistencies during long-term generation (Decart et al., 2024). + +# 3.3 Memory Representation and Retrieval + +To address the limited context window of video generative models, we introduce a memory mechanism that enables the model to retain and retrieve information beyond the current generation window. This mechanism maintains a memory bank composed of historical frames and their associated state information: $\{(\mathbf{x}_i^m,\mathbf{p}_i,t_i)\}_{i = 1}^N$ where $\mathbf{x}_i^m$ denotes a memory frame, $\mathbf{p}_i\in \mathbb{R}^5$ (x,y,z, pitch, yaw) is its pose, and $t_i$ is the timestamp. Each tuple is referred to as a memory unit. We save $\mathbf{m}_i$ in token-level, which is compressed by the visual encoder but retains enough details for reconstruction. The corresponding states $\{(\mathbf{p},t)\}$ play a critical role not only in memory retrieval but also in enabling state-aware memory conditioning. + +# Algorithm 1: Memory Retrieval Algorithm + +Input: Memory bank of $N$ historical states $\{(\mathbf{x}_i^m,\mathbf{p}_i,t_i)\}_{i = 1}^N;$ + +Current state $(\mathbf{x}_c,\mathbf{p}_c,t_c)$ ; memory condition length $L_{M}$ + +Similarity threshold $tr$ ; weights $w_{o}$ , $w_{t}$ . + +Output: A list of selected state indices $S$ + +Compute Confidence Score: + +Compute FOV overlap ratio o via Monte Carlo sampling. + +Compute time difference $\mathbf{d} = \mathrm{Concat}\big(\{|t_i - t_c|\}_{i = 1}^n\big)$ + +Compute confidence $\alpha = \mathbf{o}\cdot w_{o} - \mathbf{d}\cdot w_{t}$ + +Selection with Similarity Filtering: + +Initialize $S = \varnothing$ + +for $m = 1$ to $L_{M}$ do + +Select $i^{*}$ with highest $\alpha_{i^{*}}$ + +Append $i^{*}$ to $S$ + +Remove all $j$ where similarity $(i^{*},j) > tr$ + +return $S$ + +Memory Retrieval. Since the number of memory frames available for conditioning is limited, an efficient strategy is required to sample memory units from the memory bank. We adopt a greedy matching algorithm based on frame-pair similarity, where similarity is defined using the field-of-view (FOV) overlap ratio and timestamp differences as confidence measures. Algorithm 1 presents our approach to memory retrieval. Although simple, this strategy proves effective in retrieving relevant information for conditioning. Moreover, the model's reasoning over memory helps maintain performance even when the retrieved content is imperfect. + +# 3.4 State-aware Memory Condition + +After retrieving necessary memory units, unlike prior methods that use memory mainly for temporal smoothness (Zheng et al., 2024a) or semantic guidance (Wu et al., 2025b; Rahman et al., 2023), our goal is to explicitly reconstruct previously seen visual content – even under significant viewpoint or scene changes. This requires the model to perform spatiotemporal reasoning to extract relevant information from memory, which we model using cross-attention (Vaswani et al., 2017). Since relying solely on visual tokens can be ambiguous, we incorporate the corresponding states as cues to enable state-aware attention. + +State Embedding. State embedding provides essential spatial and temporal context for memory retrieval. To encode spatial information, we adopt Plücker embedding (Sitzmann et al., 2021) to convert 5D poses $\mathbf{p} \in \mathbb{R}^5$ into dense positional features $\mathrm{PE}(\mathbf{p}) \in \mathbb{R}^{h \times w \times 6}$ , following (He et al., 2024; Gao et al., 2024). Temporal context is captured via a lightweight MLP over sinusoidal embedded + +![](images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg) +Figure 3: Qualitative results. We showcase WORLDMEM's capabilities through two sets of examples. Top: A comparison with Ground Truth (GT). WORLDMEM accurately models diverse dynamics (e.g., rain) by conditioning on 600 past frames, ensuring temporal consistency. Bottom: Interaction with the world. Objects like hay in the desert or wheat in the plains persist over time, with wheat visibly growing. For the best experience, see the supplementary videos. + +$(SE)$ timestamps. The final embedding is (Figure 2 (c)): + +$$ +\mathbf {E} = G _ {p} (\mathrm {P E} (\mathbf {p})) + G _ {t} (\mathrm {S E} (t)), \tag {3} +$$ + +where $G_{p}$ and $G_{t}$ are MLPs mapping pose and time into a shared space. + +State-aware Memory Attention. To support reconstruction under viewpoint and temporal shifts, we introduce a state-aware attention mechanism that incorporates spatial-temporal cues into memory retrieval. By conditioning attention on both visual features and state information, the model achieves more accurate reasoning between input and memory. + +Let $\mathbf{X}_q\in \mathbb{R}^{l_q\times d}$ denote the flattened feature map of input frames (queries), and $\mathbf{X}_k\in \mathbb{R}^{l_k\times d}$ the concatenated memory features (keys and values). We first enrich both with their corresponding state embeddings $\mathbf{E}_q$ and $\mathbf{E}_k$ : + +$$ +\tilde {\mathbf {X}} _ {q} = \mathbf {X} _ {q} + \mathbf {E} _ {q}, \quad \tilde {\mathbf {X}} _ {k} = \mathbf {X} _ {k} + \mathbf {E} _ {k}. \tag {4} +$$ + +Cross-attention is then applied to retrieve relevant memory content and output updated $\mathbf{X}^{\prime}$ : + +$$ +\mathbf {X} ^ {\prime} = \operatorname {C r o s s A t t n} (Q = p _ {q} (\tilde {\mathbf {X}} _ {q}), K = p _ {k} (\tilde {\mathbf {X}} _ {k}), V = p _ {v} (\mathbf {X} _ {k})), \tag {5} +$$ + +where $p_q, p_k$ , and $p_v$ are learnable projections. + +To simplify the reasoning space, we adopt a relative state formulation. For each query frame, the state is set to a zero reference (e.g., the pose is reset to the identity and the timestamp to zero), while the states of key frames are normalized to relative values. This design, illustrated in Figure 2(d), improves alignment under viewpoint changes and simplifies the learning objective. + +![](images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg) +Figure 4: Within context window evaluation. The motion sequence involves turning right and returning to the original position, showing self-contained consistency. + +Table 1: Evaluation on Minecraft + +
Within context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.14.350.069113.87
DF20.560.009413.88
Ours21.010.007213.73
Beyond context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.///
DF18.040.437651.28
Ours19.320.142915.37
+ +![](images/d1e6c276910048364854297556611dd2a45a1eea429cab3c84376a57362243d5.jpg) +Figure 5: Beyond context window evaluation. Diffusion-Forcing suffers inconsistency over time, while ours maintains quality and recovers past scenes. + +Table 2: Ablation on embedding designs + +
Pose typeEmbed. typePSNR ↑LPIPS ↓rFID ↓
SparseAbsolute14.670.288739.23
DenseAbsolute17.630.183029.34
DenseRelative19.320.142915.37
+ +Table 3: Ablation on memory retrieve strategy + +
StrategyPSNR ↑LPIPS ↓rFID ↓
Random12.320.322447.35
+ Confidence Filter17.120.186324.33
+ Similarity Filter19.320.142915.37
+ +Incorporating memory into pipeline. We incorporate memory frames into the pipeline by treating them as clean inputs during both training and inference. As shown in Figure 2 (a-b), during training, memory frames are assigned the lowest noise level $k_{\mathrm{min}}$ , while context window frames receive independently sampled noise levels from the range $[k_{\mathrm{min}}, k_{\mathrm{max}}]$ . During inference, both memory and context frames are assigned $k_{\mathrm{min}}$ , while the current generating frames are assigned $k_{\mathrm{max}}$ . + +To restrict memory influence only to memory blocks, we apply a temporal attention mask: + +$$ +A _ {\text {m a s k}} (i, j) = \left\{ \begin{array}{l l} 1, & i \leq L _ {M} \text {a n d} j = i \\ 1, & i > L _ {M} \text {a n d} j \leq i \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {6} +$$ + +where $L_{M}$ is the number of memory frames that are appended before frames within the context window. This guarantees causal attention while preventing memory units from affecting each other. + +# 4 Experiments + +Datasets. We use MineDojo (Fan et al., 2022) to create diverse training and evaluation datasets in Minecraft, configuring diverse environments (e.g., plains, savannas, ice plains, and deserts), agent actions, and interactions. For real-world scenes, we utilize RealEstate10K (Zhou et al., 2018) with camera pose annotations to evaluate long-term world consistency. + +Metrics. For quantitative evaluation, we employ reconstruction metrics, where the method of obtaining ground truth (GT) varies by specific settings. We then assess the consistency and quality of the generated videos using PSNR, LPIPS (Zhang et al., 2018), and reconstruction FID (rFID) (Heusel et al., 2017), which collectively measure pixel-level fidelity, perceptual similarity, and overall realism. + +Experimental details. For our experiments on Minecraft (Fan et al., 2022), we utilize the Oasis (Decart et al., 2024) as the base model. Our model is trained using the Adam optimizer with a fixed + +![](images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg) +Figure 6: Results on RealEstate (Zhou et al., 2018). We visualize loop closure consistency over a full camera rotation. The visual similarity between the first and last frames serves as a qualitative indicator of 3D spatial consistency. + +![](images/ddc2586f3345e661f33248b6cc0087c4703090e0712fed894a0128507250e4b5.jpg) + +Table 4: Evaluation on RealEstate10K + +
MethodsPSNR ↑LPIPS ↓rFID ↓
CameraCtrl (He et al., 2024)13.190.3328133.81
TrajAttn (Xiao et al., 2024)14.220.3698128.36
Viewcrafter (Yu et al., 2024c)21.720.172958.43
DFoT (Song et al., 2025)16.420.2933110.34
Ours23.340.167243.14
+ +learning rate of $2 \times 10^{-5}$ . Training is conducted at a resolution of $640 \times 360$ , where frames are first encoded into a latent space via a VAE at a resolution of $32 \times 18$ , then further patchified to $16 \times 9$ . Our training dataset comprises approximately 12K long videos, each containing 1500 frames, generated from Fan et al. (2022). During training, we employ an 8-frame temporal context window alongside an 8-frame memory window. The model is trained for approximately 500K steps using 4 GPUs, with a batch size of 4 per GPU. For the hyperparameters specified in Algorithm 1 of the main paper, we set the similarity threshold $tr$ to 0.9, $w_{o}$ to 1, and $w_{t}$ to $0.2 / t_{c}$ . For the noise levels in Eq. (5) and Eq. (6), we set $k_{\min}$ to 15 and $k_{\max}$ to 1000. + +For our experiments on RealEstate10K (Zhou et al., 2018), we adopt DFoT (Song et al., 2025) as the base model. The RealEstate10K dataset provides a training set of approximately 65K short video clips. Training is conducted at a resolution of $256 \times 256$ , with frames patched to $128 \times 128$ . The model is trained for approximately 50K steps using 4 GPUs, with a batch size of 8 per GPU. + +# 4.1 Results on Generation Benchmark + +Comparisons on Minecraft Benchmark. We compare our approach with a standard full-sequence (Full Seq.) training method (He et al., 2024; Wang et al., 2024) and Diffusion Forcing (DF) (Chen et al., 2025). The key differences are as follows: the full-sequence conditional diffusion transformer (Peebles and Xie, 2023) maintains the same noise level during training and inference, DF introduces different noise levels for training and inference, and our method incorporates a memory mechanism. To assess both short-term and long-term world consistency, we conduct evaluations within and beyond the context window. We evaluate both settings on 300 test videos. In the following experiments, the agent's poses are generated by the game simulator as ground truth. However, in real-world scenarios, only the action input is available, and the pose is not directly observable. In such cases, the next-frame pose can be predicted based on the previous scenes, past states, and the upcoming action. We explore this design choice in the supplementary material. + +Within context window. For this experiment, all methods use a context window of 16, while our approach additionally maintains a memory window of 8. We test on customized motion scenarios (e.g., turn left, then turn right or move forward, then backward) to assess self-contained consistency, where the ground truth consists of previously generated frames at the same positions. As shown in Table 1 and Figure 4, the full-sequence baseline suffers from inconsistencies even within its own context window. DF improves consistency by enabling greater information exchange among generated frames. Our memory-based approach achieves the best performance, demonstrating the effectiveness of integrating a dedicated memory mechanism. + +Table 5: Ablation on sampling strategy for training + +
Sampling strategyPSNR ↑LPIPS ↓rFID ↓
Small-range13.230.378646.55
Large-range15.110.385542.96
Progressive19.320.142915.37
+ +Beyond context window. In this setting, all methods use a context window of 8 and generate 100 future frames; our method further employs a memory window of 8 while initializing a 600-frame memory bank. We compute the reconstruction error using the subsequent 100 ground truth frames after 600 frames. Full-sequence methods can not roll out that long so we exclude it. DF exhibits poor PSNR and LPIPS scores, indicating severe inconsistency with the ground truth beyond the context window. Additionally, its low rFID suggests notable quality degradation. In contrast, our memory-augmented approach consistently outperforms others across all metrics, demonstrating superior long-term consistency and quality preservation. Figure 5 further substantiates these findings. + +Figure 3 showcases WORLDMEM's capabilities. The top section demonstrates its ability to operate in a free action space across diverse environments. Given a 600-frame memory bank, our model generates 100 future frames while preserving the ground truth's actions and poses, ensuring strong world consistency. The bottom section highlights dynamic environment interaction. By using timestamps as embeddings, the model remembers environmental changes and captures natural event evolution, such as plant growth over time. + +Comparisons on Real Scenarios. We compare our method with prior works (He et al., 2024; Xiao et al., 2024; Yu et al., 2024c; Song et al., 2025) on the RealEstate10K dataset (Zhou et al., 2018). We design 5 evaluation trajectories, each starting and ending at the same pose, across 100 scenes. The trajectory lengths range from 37 to 60 frames – exceeding the training lengths of all baselines (maximum 25 frames). + +CameraCtrl (He et al., 2024), TrajAttn (Xiao et al., 2024), and DFoT (Song et al., 2025) discard past frames and suffer from inconsistency. Viewcrafter (Yu et al., 2024c) incorporates explicit 3D reconstruction, yielding better results, but is constrained by errors in post-processing such as reconstruction and rendering. As shown in Table 4 and Figure 6, our approach achieves superior performance across all metrics. However, the RealEstate dataset inherently limits the full potential of our method, as it consists of short, non-interactive clips with limited temporal complexity. We leave evaluation under more challenging and interactive real-world scenarios for future work. + +# 4.2 Ablation + +**Embedding designs.** The design of embeddings within the memory block is crucial for cross-frame relationship modeling. We evaluate three strategies (Table 2): (1) sparse pose embedding with absolute encoding, (2) dense pose embedding with absolute encoding, and (3) dense pose embedding with relative encoding. Results show that dense pose embeddings (Plücker embedding) significantly enhance all metrics, emphasizing the benefits of richer pose representations. Switching from absolute to relative encoding further improves performance, particularly in LPIPS and rFID, by facilitating relationship reasoning and information retrieval. As illustrated in Figure 7, absolute embeddings accumulate errors over time, while relative embeddings maintain stability even beyond 300 frames. + +Sampling strategy for training. We compare different sampling strategies during training in the Minecraft benchmark. Small-range sampling restricts memory conditioning to frames within $2\mathrm{m}$ in the Minecraft world, while large-range sampling extends this range to $8\mathrm{m}$ . Progressive sampling, on the other hand, begins with small-range samples for initial training steps and then gradually expands to large-range samples. + +As shown in Table 5, both small-range and large-range sampling struggle with consistency and quality, whereas progressive sampling significantly improves all metrics. This suggests that gradually increasing difficulty during training helps the model learn to reason and effectively query information from memory blocks. + +Time condition. We ablate the effectiveness of the timestamp condition (for both embedding and retrieval) in Table 6. We curate 100 video samples featuring placing events and evaluate whether future generations align with event progression. As shown in the table, incorporating the time + +![](images/c891de3c6e4cfb733593f322c26a2b2245c57e9ed2ba6ef2a8161dce2e9e97c1.jpg) +Figure 7: Long-term Generation Comparison. This figure presents the PSNR of different ablation methods compared to the ground truth over a 300-frame sequence. The results show that our method without memory blocks or using random memory retrieval exhibits immediate inconsistencies with the ground truth. Additionally, the model lacking relative embeddings begins to degrade significantly beyond 100 frames. In contrast, our full method maintains strong consistency even beyond 300 frames. + +![](images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg) +Figure 8: Results w/o and w/ time condition. Without timestamps, the model fails to differentiate memory units from the same location at different times, causing errors. With time conditioning, it aligns with the updated world state, ensuring consistency. + +Table 6: Ablation on time condition + +
Time conditionPSNR ↑LPIPS ↓rFID ↓
w/o17.170.198923.89
w/19.120.161316.53
+ +condition significantly improves PSNR and LPIPS, indicating that adding temporal information helps the model faithfully reproduce event changes in world simulation. Since events like plant growth are inherently unpredictable, we do not conduct quantitative evaluations on such cases but instead provide qualitative illustrations in Figure 8. + +Memory retrieve strategy. We analyze memory retrieval strategies in Table 3. Random sampling from the memory bank leads to poor performance and severe quality degradation, as evidenced by a sharp drop in rFID and rapid divergence from the ground truth (Figure 7). The confidence-based filtering significantly enhances consistency and generation quality. Additionally, we refine retrieval by filtering out redundant memory units based on similarity, further improving all evaluation metrics and demonstrating the effectiveness of our approach. + +# 5 Limitations and Future works + +Despite the effectiveness of our approach, certain issues warrant further exploration. First, we cannot guarantee that we can always retrieve all necessary information from the memory bank In some corner cases (e.g., when views are blocked by obstacles), relying solely on view overlap may be insufficient. Second, our current interaction with the environment lacks diversity and realism. In future work, we plan to extend our models to real-world scenarios with more realistic and varied interactions. Lastly, our memory design still entails linearly increasing memory usage, which may impose limitations when handling extremely long sequences. + +# 6 Conclusion + +In conclusion, WOrLDMEM tackles the longstanding challenge of maintaining long-term consistency in world simulation by employing a memory bank of past frames and associated states. Its memory attention mechanism enables accurate reconstruction of previously observed scenes, even under large viewpoints or temporal gaps, and effectively models dynamic changes over time. Extensive experiments in both virtual and real settings confirm WOrLDMEM's capacity for robust, immersive world simulation. We hope our work will encourage further research on the design and applications of memory-based world simulators. + +Acknowledgements. This research is supported by the National Research Foundation, Singapore, under its NRF Fellowship Award . This research is also supported by NTU SUG-NAP, as well as cash and in-kind funding from NTU S-Lab and industry partner(s). + +# References + +Eloi Alonso, Adam Jelley, Vincent Micheli, Anssi Kanervisto, Amos J Storkey, Tim Pearce, and François Fleuret. Diffusion for world modeling: Visual details matter in atari. Advances in Neural Information Processing Systems, 37:58757-58791, 2025. +Amir Bar, Gaoyue Zhou, Danny Tran, Trevor Darrell, and Yann LeCun. Navigation world models, 2024. +Charles Beattie, Joel Z Leibo, Denis Teplyashin, Tom Ward, Marcus Wainwright, Heinrich Kuttler, Andrew Lefrancq, Simon Green, Víctor Valdés, Amir Sadik, et al. Deepmind lab. arXiv preprint arXiv:1612.03801, 2016. +Boyuan Chen, Diego Martí Monsó, Yilun Du, Max Simchowitz, Russ Tedrake, and Vincent Sitzmann. Diffusion forcing: Next-token prediction meets full-sequence diffusion. Advances in Neural Information Processing Systems, 37:24081-24125, 2025. +Haoxin Chen, Menghan Xia, Yingqing He, Yong Zhang, Xiaodong Cun, Shaoshu Yang, Jinbo Xing, Yaofang Liu, Qifeng Chen, Xintao Wang, et al. Videocraft1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512, 2023. +Decart, Julian Quevedo, Quinn McIntyre, Spruce Campbell, Xinlei Chen, and Robert Wachen. Oasis: A universe in a transformer. 2024. Project website. +Linxi Fan, Guanzhi Wang, Yunfan Jiang, Ajay Mandlekar, Yuncong Yang, Haoyi Zhu, Andrew Tang, DeAn Huang, Yuke Zhu, and Anima Anandkumar. Minedojo: Building open-ended embodied agents with internet-scale knowledge. Advances in Neural Information Processing Systems, 35:18343-18362, 2022. +Ruili Feng, Han Zhang, Zhantao Yang, Jie Xiao, Zhilei Shu, Zhiheng Liu, Andy Zheng, Yukun Huang, Yu Liu, and Hongyang Zhang. The matrix: Infinite-horizon world generation with real-time moving control. arXiv preprint arXiv:2412.03568, 2024. +Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. arXiv preprint arXiv:2405.10314, 2024. +Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023. +David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018a. +David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018b. +Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019. +Danijar Hafner, Timothy Lillicrap, Mohammad Norouzi, and Jimmy Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020. +Hao He, Yinghao Xu, Yuwei Guo, Gordon Wetzstein, Bo Dai, Hongsheng Li, and Ceyuan Yang. Cameractrol: Enabling camera control for text-to-video generation. arXiv preprint arXiv:2404.02101, 2024. +Roberto Henschel, Levon Khachatryan, Daniil Hayrapetyan, Hayk Poghosyan, Vahram Tadevosyan, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Streamingt2v: Consistent, dynamic, and extendable long video generation from text. arXiv preprint arXiv:2403.14773, 2024. +Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017. +Yining Hong, Beide Liu, Maxine Wu, Yuanhao Zhai, Kai-Wei Chang, Linjie Li, Kevin Lin, Chung-Ching Lin, Jianfeng Wang, Zhengyuan Yang, Ying Nian Wu, and Lijuan Wang Wang. Slowfast-vgen: Slow-fast learning for action-driven long video generation. arXiv preprint arXiv:2410.23277, 2024. +Anthony Hu, Lloyd Russell, Hudson Yeo, Zak Murez, George Fedoseev, Alex Kendall, Jamie Shotton, and Gianluca Corrado. Gaia-1: A generative world model for autonomous driving. arXiv preprint arXiv:2309.17080, 2023. + +Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022. +Hanwen Jiang, Hao Tan, Peng Wang, Haian Jin, Yue Zhao, Sai Bi, Kai Zhang, Fujun Luan, Kalyan Sunkavalli, Qixing Huang, et al. Rayzer: A self-supervised large view synthesis model. arXiv preprint arXiv:2505.00702, 2025. +Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024. +Jihwan Kim, Junoh Kang, Jinyoung Choi, and Bohyung Han. FIFO-diffusion: Generating infinite videos from text without training. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. +Dan Kondratyuk, Lijun Yu, Xiuye Gu, José Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Josh Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A. Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation, 2024. +Hanwen Liang, Junli Cao, Vidit Goel, Guocheng Qian, Sergei Korolev, Demetri Terzopoulos, Konstantinos N Plataniotis, Sergey Tulyakov, and Jian Ren. Wonderland: Navigating 3d scenes from a single image. arXiv preprint arXiv:2412.12091, 2024. +Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model. arXiv preprint arXiv:2408.16767, 2024. +Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Ziwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. Latte: Latent diffusion transformer for video generation. arXiv preprint arXiv:2401.03048, 2024. +OpenAI. Video generation models as world simulators. https://openai.com/research/video-generation-models-as-world-simulators, 2024. +Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjeyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Roktaschel. Genie 2: A large-scale foundation world model. 2024. +William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023. +Tanzila Rahman, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, Shweta Mahajan, and Leonid Sigal. Make-a-story: Visual memory conditioned consistent story generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2493-2502, 2023. +Xuanchi Ren, Tianchang Shen, Jiahui Huang, Huan Ling, Yifan Lu, Merlin Nimier-David, Thomas Müller, Alexander Keller, Sanja Fidler, and Jun Gao. Gen3c: 3d-informed world-consistent video generation with precise camera control. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025. +Mehdi SM Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, et al. Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6229-6238, 2022. +Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021. +Kiwhan Song, Boyuan Chen, Max Simchowitz, Yilun Du, Russ Tedrake, and Vincent Sitzmann. History-guided video diffusion. arXiv preprint arXiv:2502.06764, 2025. + +Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020. +Dani Valevski, Yaniv Leviathan, Moab Arar, and Shlomi Fruchter. Diffusion models are real-time game engines. arXiv preprint arXiv:2408.14837, 2024. +Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need, 2017. +Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. +Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023a. +Jing Wang, Fengzhuo Zhang, Xiaoli Li, Vincent YF Tan, Tianyu Pang, Chao Du, Aixin Sun, and Zhuoran Yang. Error analyses of auto-regressive video diffusion models: A unified framework. arXiv preprint arXiv:2503.10704, 2025. +Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023b. +Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. Motionctrl: A unified and flexible motion controller for video generation. In ACM SIGGRAPH 2024 Conference Papers, pages 1-11, 2024. +Sibo Wu, Congrong Xu, Binbin Huang, Andreas Geiger, and Anpei Chen. Genfusion: Closing the loop between reconstruction and generation via videos. arXiv preprint arXiv:2503.21219, 2025a. +Tong Wu, Zhihao Fan, Xiao Liu, Yeyun Gong, Yelong Shen, Jian Jiao, Hai-Tao Zheng, Juntao Li, Zhongyu Wei, Jian Guo, Nan Duan, and Weizhu Chen. Ar-diffusion: Auto-regressive diffusion model for text generation, 2023. +Xindi Wu, Uriel Singer, Zhaojiang Lin, Andrea Madotto, Xide Xia, Yifan Xu, Paul Crook, Xin Luna Dong, and Seungwhan Moon. Corgi: Cached memory guided video generation. In 2025 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 4585-4594. IEEE, 2025b. +Zeqi Xiao, Wenqi Ouyang, Yifan Zhou, Shuai Yang, Lei Yang, Jianlou Si, and Xingang Pan. Trajectory attention for fine-grained video motion control. arXiv preprint arXiv:2411.19324, 2024. +Jingjing Xu, Xu Sun, Zhiyuan Zhang, Guangxiang Zhao, and Junyang Lin. Understanding and improving layer normalization. Advances in neural information processing systems, 32, 2019. +Mengjiao Yang, Yilun Du, Kamyar Ghasemipour, Jonathan Tompson, Dale Schuurmans, and Pieter Abbeel. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 1(2):6, 2023. +Tianwei Yin, Qiang Zhang, Richard Zhang, William T Freeman, Fredo Durand, Eli Shechtman, and Xun Huang. From slow bidirectional to fast causal video generators. arXiv preprint arXiv:2412.07772, 2024. +Hong-Xing Yu, Haoyi Duan, Charles Herrmann, William T Freeman, and Jiajun Wu. Wonderworld: Interactive 3d scene generation from a single image. arXiv preprint arXiv:2406.09394, 2024a. +Hong-Xing Yu, Haoyi Duan, Junhwa Hur, Kyle Sargent, Michael Rubinstein, William T Freeman, Forrester Cole, Deqing Sun, Noah Snavely, Jiajun Wu, et al. Wonderjourney: Going from anywhere to everywhere. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6658-6667, 2024b. +Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, Kun Gai, Hao Chen, and Xihui Liu. A survey of interactive generative video. arXiv preprint arXiv:2504.21853, 2025a. +Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Position: Interactive generative video as next-generation game engine. arXiv preprint arXiv:2503.17359, 2025b. +Jiwen Yu, Yiran Qin, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Gamefactory: Creating new games with generative interactive videos. arXiv preprint arXiv:2501.08325, 2025c. + +Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024c. +Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. +Longtao Zheng, Yifan Zhang, Hanzhong Guo, Jiachun Pan, Zhenxiong Tan, Jiahao Lu, Chuanxin Tang, Bo An, and Shuicheng Yan. Memo: Memory-guided diffusion for expressive talking video generation. arXiv preprint arXiv:2412.04448, 2024a. +Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. Open-sora: Democratizing efficient video production for all, 2024b. +Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images. In SIGGRAPH, 2018. + +# 7 Supplementary Materials + +# 7.1 Details and Experiments + +**Embedding designs.** We present the detailed designs of embeddings for timesteps, actions, poses, and timestamps in Figure 10, where $F, C, H, W, A$ denote the frame number, channel count, height, width, and action count, respectively. + +The input pose is parameterized by position $(x,z,y)$ and orientation (pitch $\theta$ and yaw $\phi$ ). The extrinsic matrix $\mathbf{T} \in \mathbb{R}^{4 \times 4}$ is formed as: + +$$ +\mathbf {T} = \left[ \begin{array}{l l} \mathbf {R} _ {c} & \mathbf {c} \\ \mathbf {0} ^ {T} & 1 \end{array} \right], \tag {7} +$$ + +where $\mathbf{c} = (x,z,y)^T$ and $\mathbf{R}_c = \mathbf{R}_y(\phi)\mathbf{R}_x(\theta)$ + +To encode camera pose, we adopt the Plücker embedding. Given a pixel $(u,v)$ with normalized camera coordinates: + +$$ +\boldsymbol {\pi} _ {u v} = \mathbf {K} ^ {- 1} [ u, v, 1 ] ^ {T}, \tag {8} +$$ + +its world direction is: + +$$ +\mathbf {d} _ {u v} = \mathbf {R} _ {c} \boldsymbol {\pi} _ {u v} + \mathbf {c}. \tag {9} +$$ + +The Plücker embedding is: + +$$ +\mathbf {l} _ {u v} = \left(\mathbf {c} \times \mathbf {d} _ {u v}, \mathbf {d} _ {u v}\right) \in \mathbb {R} ^ {6}. \tag {10} +$$ + +For a frame of size $H \times W$ , the full embedding is: + +$$ +\mathbf {L} _ {i} \in \mathbb {R} ^ {H \times W \times 6}. \tag {11} +$$ + +Memory context length. We evaluate how different memory context lengths affect performance in the Minecraft benchmark. Table 7 shows that increasing the context length from 1 to 8 steadily boosts PSNR, lowers LPIPS, and reduces rFID. However, extending the length to 16 deteriorates results, indicating that excessive memory frames may introduce noise or reduce retrieval precision. A context length of 8 provides the best trade-off, yielding the highest PSNR and the lowest LPIPS and rFID. + +Pose prediction. For interactive play, ground truth poses are not accessible. To address this, we designed a lightweight pose prediction module that estimates the pose of the next frame. As illustrated in Figure 9, the predictor takes the previous image, the previous pose, and the upcoming action as inputs and outputs the predicted next pose. This module enables the system to operate using actions alone, eliminating the need for ground truth poses during inference. In Table 8, we compare the performance of using predicted poses versus ground truth poses. While using ground truth poses yields better results across all metrics, the performance drop with predicted poses is acceptable. This is because our method does not rely heavily on precise pose predictions – new frames are generated based on these predictions – and the ground truth poses generated by the Minecraft simulator also contain a certain degree of randomness. + +Table 7: Ablation on length of memory context length + +
LengthPSNR ↑LPIPS ↓rFID ↓
116.180.189920.47
418.680.156816.54
819.320.142915.37
1617.140.168718.33
+ +Table 8: Comparison between using predicted poses and ground truth poses + +
Pose TypePSNR ↑LPIPS ↓rFID ↓
Ground truth19.320.142915.37
Predicted17.130.178620.36
+ +![](images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg) +Figure 9: Structure of pose predictor. + +![](images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg) +(a) Timestep embedding +(b) Action embedding + +![](images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg) +(c) Pose embedding +(d) Timestamp embedding +Figure 10: Illustration of different embeddings. + +# 7.2 Memory Usage and Scalability Analysis + +To assess the scalability and practical feasibility of our method, we provide detailed quantitative analysis covering memory usage, generation duration, training cost, and inference efficiency. + +Memory Usage of the Memory Bank. The memory bank is lightweight. Storing 600 visual memory tokens with shape [600, 16, 18, 32] in float32 takes approximately 21MB. + +Retrieval Latency. Below we report the average retrieval time (for 8 memory frames) as a function of memory bank size: + +![](images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg) +Figure 11: Two-view FOV overlapping visualization. + +
Number of Memory CandidatesRetrieval Time (s)
100.04
1000.06
6000.10
10000.16
+ +The generation cost (20 denoising steps) is $\sim 0.9$ s per frame. Retrieval time accounts for only $10 - 20\%$ of total inference time even with 1000 candidates. + +Comparison with Baseline. We compare our method with a baseline model (without memory), under consistent settings: 8 context frames, 8 memory frames, 20 denoising steps, and no acceleration techniques, on single H200. + +
MethodTrainingInference
Mem. UsageSpeed (it/s)Mem. UsageSpeed (it/s)
w/o Memory33 GB3.199 GB1.03
with Memory51 GB1.7611 GB0.89
+ +Adding memory introduces moderate training overhead. During inference, the impact is minimal: only a small increase in memory usage and a slight decrease in speed. + +Inference Optimization. With modern acceleration techniques (e.g., timestep distillation, early exit, sparse attention), inference speed can reach $\sim 10$ FPS, making our method practical for deployment. + +FOV Overlapping Computation. We present the details of Monte Carlo-based FOV overlapping computation in Alg. 11, as well as the two-view overlapping sampling in Figure 11. + +# 7.3 Visualizations + +In this section, we provide more visualization of different aspects to facilitate understanding. + +Minecraft Training Examples. We present a diverse set of training environments that include various terrain types, action spaces, and weather conditions, as shown in Figure 12. These variations help enhance the model's adaptability and robustness in different scenarios. + +Trajectory Examples in Minecraft. Figure 13 illustrates trajectory examples in the x-z space over 100 frames. The agent's movement exhibits a random action pattern, ensuring diverse learning objectives and a broad range of sampled experiences. + +Pose Distribution. We collect and visualize 800 samples within a sampling range of 8, as shown in Figure 14. The random pattern observed in Figure 14 ensures a diverse distribution of sampled poses in space, which is beneficial for learning the reasoning process within the memory blocks. + +# Algorithm 2: Monte Carlo-based FOV Overlap Computation (Notationally Disjoint) + +# Input: + +- $Q_{\mathrm{ref}} \in \mathbb{R}^{F \times 5}$ : reference poses from memory bank (x,y,z,pitch,yaw), $F$ is the number of stored poses. +- $Q_{\mathrm{tgt}} \in \mathbb{R}^5$ : pose of the current (target) frame. +- $M$ : number of 3D sample points (default 10,000). +- $R$ : radius of the sampling sphere (default $30\mathrm{m}$ ). +- $\phi_h$ , $\phi_v$ : horizontal/vertical field-of-view angles (in degrees). + +# Output: + +- $\rho \in \mathbb{R}^F$ : overlapping ratios between each reference pose and the target pose. + +# begin + +# $\triangle$ Step 1: Random Sampling in a Sphere + +Generate $M$ points $\mathbf{q}$ uniformly in a 3D sphere of radius $R$ : + +$$ +\mathbf {q} \leftarrow \text {P o i n t S a m p l i n g} (M, R). +$$ + +# $\Delta$ Step 2: Translate Points to $Q_{\mathrm{tgt}}$ as Center + +Let $Q_{\mathrm{tgt}}(x,y,z)$ be the 3D coordinates of the current camera pose. Shift all sampled points: + +$$ +\mathbf {q} \leftarrow \mathbf {q} + Q _ {\mathrm {t g t}} (x, y, z). +$$ + +# $\Delta$ Step 3: FOV Checks + +Compute a boolean matrix $\mathbf{v}_{\mathrm{ref}} \in \{0,1\}^{F \times M}$ , where each entry indicates if a point in $\mathbf{q}$ lies in the FOV of a reference pose: + +$$ +\mathbf {v} _ {\mathrm {r e f}} \leftarrow \operatorname {I s I n s i d e F O V} \big (\mathbf {q}, Q _ {\mathrm {r e f}}, \phi_ {h}, \phi_ {v} \big). +$$ + +Similarly, compute a boolean vector $\mathbf{v}_{\mathrm{tg}} \in \{0,1\}^{M}$ for the target pose: + +$$ +\mathbf {v} _ {\mathrm {t g t}} \leftarrow \operatorname {I s I n s i d e F O V} \big (\mathbf {q}, Q _ {\mathrm {t g t}}, \phi_ {h}, \phi_ {v} \big). +$$ + +# $\Delta$ Step 4: Overlapping Ratio Computation + +Obtain the final overlapping ratio vector $\pmb {\rho}\in \mathbb{R}^{F}$ by combining $\mathbf{v}_{\mathrm{ref}}$ and $\mathbf{v}_{\mathrm{tgt}}$ . For instance, + +$$ +\boldsymbol {\rho} [ i ] = \frac {1}{M} \sum_ {j = 1} ^ {M} \left(\mathbf {v} _ {\mathrm {r e f}} [ i, j ] \cdot \mathbf {v} _ {\mathrm {t g t}} [ j ]\right), +$$ + +to measure the fraction of sampled points that are visible in both the $i$ -th reference pose and the target pose. + +Return $\rho$ + +# end + +More Qualitative Results. For additional qualitative examples, we recommend consulting the attached web page, which offers enhanced visualizations. + +![](images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg) + +![](images/9e7db2f2124ac20efa2edca5ec9e00b0b6d99dcd36451b763e6533c66aad42f2.jpg) + +![](images/9e73712af021410a3cc6091b7f271192d0761ef5e0bbac919792a0ed2bc5e942.jpg) + +![](images/b4112e672e093bd908e0d8b47f478e0720181eb791bfc4170bf71a494e2cad04.jpg) + +![](images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg) + +![](images/b2fb6711c620d4ecd7ae763026c5f9b183d24737649a7f6c09253b2753625d9a.jpg) + +![](images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg) + +![](images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg) + +![](images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg) + +![](images/e45872d86a46e6fd48f3db8a029253a71ef4ec766b935af977cd99b1b0592ac2.jpg) + +![](images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg) + +![](images/c559b904ce54ed8b960ef64d960db2b2626c58caca3d6c512ff9ee87f3d438a7.jpg) + +![](images/e7a0d754531749230bbd4a9a05832bffb34d64393251380876fd18cad297f056.jpg) + +![](images/e21770768b7f5933c8a6579cb05bb882d2db0cd3a528b963e76b28e85fc3f88e.jpg) + +![](images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg) + +![](images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg) + +![](images/d885c7d407ce44ff8633d17891065c18fb770ac9a2dbdb8904c89b0abb280874.jpg) +Figure 12: Training Examples. Our training environments encompass diverse terrains, action spaces, and weather conditions, providing a comprehensive setting for learning. + +![](images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg) + +![](images/ef99e53069789de1c72b40dec9daf83482c8b1d58b900b04b7b673a7536cdbeb.jpg) + +![](images/b7b3bfbd5ce5428451351fb16ec099c6e94af83843c985d33a1808711993472e.jpg) + +![](images/f69aaeecb41bae0b361ddaf6325948bcdd310d6207adc846da6a8605dba8f003.jpg) +Figure 13: Visualization of Trajectory Examples in the X-Z Space. The axis scales represent distances within the Minecraft environment. + +![](images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg) +Figure 14: Visualization of Relative Pose Distribution for Training in X-Z Space. Red dots indicate positions, while yellow arrows represent directions. \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12369/images/051a668f07afe27adca49a42fba69f683663d42d43addef7ff5276c78d55d7e8.jpg b/data/2025/2504_12xxx/2504.12369/images/051a668f07afe27adca49a42fba69f683663d42d43addef7ff5276c78d55d7e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c6a634674b242a3b016efa35bdd2eb72934cbd9 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/051a668f07afe27adca49a42fba69f683663d42d43addef7ff5276c78d55d7e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a0c2c12db978c7fe6259daf1fb06fb0b7fe21d55f9197cd26ac6f3d11d1ff60 +size 8864 diff --git a/data/2025/2504_12xxx/2504.12369/images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg b/data/2025/2504_12xxx/2504.12369/images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28d8625af09bd3ddeff803e9753d1f6efedd7916 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50e567e8cacd4fbd30cb79a6714d48bb410c685b85e6a75b8d1cedc387228763 +size 31756 diff --git a/data/2025/2504_12xxx/2504.12369/images/128fabee19e5abaad9587da0de6cd970dc5cc8944b3b1196aad6e7166dc04fe7.jpg b/data/2025/2504_12xxx/2504.12369/images/128fabee19e5abaad9587da0de6cd970dc5cc8944b3b1196aad6e7166dc04fe7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b7ffb8be5a82b4b1822abb9b6a7cb5ea1157b621 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/128fabee19e5abaad9587da0de6cd970dc5cc8944b3b1196aad6e7166dc04fe7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f9d4125cf80216d443167862bbade98c02d37a71d89ca93e697a24e5c281f2d +size 6208 diff --git a/data/2025/2504_12xxx/2504.12369/images/137374daba42cc29ee3827d4a155e71a28fcefdc5271bea03e2c5223a0b3ef72.jpg b/data/2025/2504_12xxx/2504.12369/images/137374daba42cc29ee3827d4a155e71a28fcefdc5271bea03e2c5223a0b3ef72.jpg new file mode 100644 index 0000000000000000000000000000000000000000..02a0276b8b854dde6dad90a6bb95da48a87b58b0 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/137374daba42cc29ee3827d4a155e71a28fcefdc5271bea03e2c5223a0b3ef72.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ae4a1ebe05ce4a35864f10b3ba43eaed93a41f8cf834f0aafefd1de4b1fa7d2 +size 3039 diff --git a/data/2025/2504_12xxx/2504.12369/images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg b/data/2025/2504_12xxx/2504.12369/images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e46f650d1a3beaca667b9a0b8b4ad8cdca7733bf --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f33cbd87baa12cbaf0c508a823fb9fc3da9e436438bbfe06e013403610ccb4a6 +size 8701 diff --git a/data/2025/2504_12xxx/2504.12369/images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg b/data/2025/2504_12xxx/2504.12369/images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84454a8139039b2a45295e277506028d1552b2c2 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7693c1438099cf1195537b933db6b138f6a4323bffdcf7228d8b6bba9aad8f7e +size 7704 diff --git a/data/2025/2504_12xxx/2504.12369/images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg b/data/2025/2504_12xxx/2504.12369/images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e5b3aafce312ae85a46dd18496cda743414a3c9f --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a897aa5a34c11dc1490a16b46518663829277a81239528203f63dc3769a9cc61 +size 29578 diff --git a/data/2025/2504_12xxx/2504.12369/images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg b/data/2025/2504_12xxx/2504.12369/images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..95d56437d378374f4010599fc5a916d497f0f0dd --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dc276530879c82f290c3919bc5ba3255f197267e1c5e556096e8ecf93e7af15 +size 101049 diff --git a/data/2025/2504_12xxx/2504.12369/images/30d17f42dbaa4ca8d8b12815ea604146efbf347f4ae14367a292fdb24ea2af4b.jpg b/data/2025/2504_12xxx/2504.12369/images/30d17f42dbaa4ca8d8b12815ea604146efbf347f4ae14367a292fdb24ea2af4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca3710c5e96c1bd5990bf59f766a3645e0780706 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/30d17f42dbaa4ca8d8b12815ea604146efbf347f4ae14367a292fdb24ea2af4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ccc0636b1421424a750e8e51ca7d7f6fb51a2d1b4cd9f625129c4e26cb8d7eeb +size 14484 diff --git a/data/2025/2504_12xxx/2504.12369/images/311186cc1d836831fefdca576808fd26c822e109d2ddfda303da4ad7c48f137b.jpg b/data/2025/2504_12xxx/2504.12369/images/311186cc1d836831fefdca576808fd26c822e109d2ddfda303da4ad7c48f137b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9218e1866a39fbd423240c4136beff122639cfed --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/311186cc1d836831fefdca576808fd26c822e109d2ddfda303da4ad7c48f137b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9137de795e46f0723457386843821292b594d0d948f8783697b531d31f7a8ba9 +size 16856 diff --git a/data/2025/2504_12xxx/2504.12369/images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg b/data/2025/2504_12xxx/2504.12369/images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b78699d74643f63e89928d0e02b7f18ef3e0af1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d4beaca4b34ba16af96423f8a2f335a70d3fd5de4fb6c20ab4b1b2ef17c79e0 +size 19049 diff --git a/data/2025/2504_12xxx/2504.12369/images/3549a7c6280ea8f465ef040bf31d25e38f052420b7d8d952435c0d665c12cb43.jpg b/data/2025/2504_12xxx/2504.12369/images/3549a7c6280ea8f465ef040bf31d25e38f052420b7d8d952435c0d665c12cb43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4cafd4872ca77270524313f6880d00e1505b5f75 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/3549a7c6280ea8f465ef040bf31d25e38f052420b7d8d952435c0d665c12cb43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7d0c2bb16c291f89d977f7c5d7cd5a9e554700b2ec538087e4eda9550104c5c +size 7628 diff --git a/data/2025/2504_12xxx/2504.12369/images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg b/data/2025/2504_12xxx/2504.12369/images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18d9de35964937af337b575d5131172a6290d542 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec51c7196a374f0dd3e6300c70c0e5c2ec4f1a263d0969bd1de449a2959e2b64 +size 6823 diff --git a/data/2025/2504_12xxx/2504.12369/images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg b/data/2025/2504_12xxx/2504.12369/images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9089edda1a18ac2e03e4aaba52eaaeb456c13be1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e84bc4662f46f6e6117d8f51ebad162364aa9b24ea397a5d53d11760dbf3f82 +size 15079 diff --git a/data/2025/2504_12xxx/2504.12369/images/4cd14cfef9f9a857f1658afa482563f9f9aae5ff0fde6e994ca27c3ce2daf2f1.jpg b/data/2025/2504_12xxx/2504.12369/images/4cd14cfef9f9a857f1658afa482563f9f9aae5ff0fde6e994ca27c3ce2daf2f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8675ad2f16ed9455e55addc792f4c47a3bce9abf --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/4cd14cfef9f9a857f1658afa482563f9f9aae5ff0fde6e994ca27c3ce2daf2f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03d6a781be4120784a21a88111427afef66deba3a5d8a2eda4ea38c8d4cb6b5f +size 5672 diff --git a/data/2025/2504_12xxx/2504.12369/images/5019318d9bedd2a41eeab53f93ae9a8dc4075660cc387a33e5ac9e4fd4af8336.jpg b/data/2025/2504_12xxx/2504.12369/images/5019318d9bedd2a41eeab53f93ae9a8dc4075660cc387a33e5ac9e4fd4af8336.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09e92b0c86dde7bfa62b88f1a6d8c8dc3f2f50ec --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/5019318d9bedd2a41eeab53f93ae9a8dc4075660cc387a33e5ac9e4fd4af8336.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e5ed5daadc5f4236edc97a7e9b9e66b482a926419275b922321b21addea5863 +size 3022 diff --git a/data/2025/2504_12xxx/2504.12369/images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg b/data/2025/2504_12xxx/2504.12369/images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0cfb30e23e454f3a66a03dc7f028d0cd37e54b70 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c6c8b332507437475cbf3728510516ab5a50ab2054ffe0e0085d842daedd8bf +size 38101 diff --git a/data/2025/2504_12xxx/2504.12369/images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg b/data/2025/2504_12xxx/2504.12369/images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d83027c3bc233f6eb90e1d3766876fae5564749c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa0c3b9bb417311acd5c45d37778f5ab4eb154ed8b951400a0eefd90f553774c +size 26628 diff --git a/data/2025/2504_12xxx/2504.12369/images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg b/data/2025/2504_12xxx/2504.12369/images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3136bac7501b44c263c28cdcd9cf9bcfe7f574b6 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09282f2a556fa76c518e3ccb1767af81b5d9881432e8f7daa9fd04b145e212c5 +size 6698 diff --git a/data/2025/2504_12xxx/2504.12369/images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg b/data/2025/2504_12xxx/2504.12369/images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c3147415bd38a5b188a466b8a173d0fa930fe1b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab25509ad271d317dff35776a5b4903af8d69690659790a05756d13843041f4c +size 84711 diff --git a/data/2025/2504_12xxx/2504.12369/images/5be7f40ca0170cf0931349adf148b2063a4d2a61d782d7c6200d3dc6a412a8d7.jpg b/data/2025/2504_12xxx/2504.12369/images/5be7f40ca0170cf0931349adf148b2063a4d2a61d782d7c6200d3dc6a412a8d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2b7f9ae18ab04b8bf273dae7cf8f0f3b0ca859a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/5be7f40ca0170cf0931349adf148b2063a4d2a61d782d7c6200d3dc6a412a8d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5aef0cf9e74a006f4c2b02479cd4b683cbc49aac31d6b709d049f4ed0ca8c942 +size 19301 diff --git a/data/2025/2504_12xxx/2504.12369/images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg b/data/2025/2504_12xxx/2504.12369/images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2ddf16d4e648aacf8858258b95b6a7dc35945d4 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b37bbcf881b1a690fc4cc69c699aa4d2fbfdec3324a4f2066c9b14729909e77 +size 7636 diff --git a/data/2025/2504_12xxx/2504.12369/images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg b/data/2025/2504_12xxx/2504.12369/images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc98bfbc6f00bc4a773ce140f4e3f66eba3b5287 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35318cb395cea1ec0b7f5d8d9c5f77993cd58c9a87b3271472f565e10f4299c5 +size 18813 diff --git a/data/2025/2504_12xxx/2504.12369/images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg b/data/2025/2504_12xxx/2504.12369/images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a16c7ade8691d83cac4b70d0c91d3b87dc10b5d9 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cec7f0cc68b6a1ea3ed79b8606ec2375c3e600fd0dfcdb7e4b07ae7a9f056b7a +size 5505 diff --git a/data/2025/2504_12xxx/2504.12369/images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg b/data/2025/2504_12xxx/2504.12369/images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bd39d3bf590a98bba65be35c77abd1aba4bab262 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7ba67d86d7c9c2af55b110dd0cc63b71e7faa2b26bee6fda1abf8517aa41fb5 +size 6137 diff --git a/data/2025/2504_12xxx/2504.12369/images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg b/data/2025/2504_12xxx/2504.12369/images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8410807da51e55a7d8098b662e5481c4b090431 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd7a7b36af64f8faff66ba4e75949109059ecc80b5490eaff3db19c6f2c92a8b +size 152034 diff --git a/data/2025/2504_12xxx/2504.12369/images/7e60e4ac83e5a8851f5a56840f7cc4b18e041198f40aa3ce69ed935d26ae78dc.jpg b/data/2025/2504_12xxx/2504.12369/images/7e60e4ac83e5a8851f5a56840f7cc4b18e041198f40aa3ce69ed935d26ae78dc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..506c11b2a632b1e973f039eb898e699309a7741a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/7e60e4ac83e5a8851f5a56840f7cc4b18e041198f40aa3ce69ed935d26ae78dc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daa5dd967c555c0f9798a9637b657cb7486070c1ed776c3beb9cdc7dc1bd68ed +size 3436 diff --git a/data/2025/2504_12xxx/2504.12369/images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg b/data/2025/2504_12xxx/2504.12369/images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d9c6eebfeedc1a23b930f854c05af6e0caee321 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c36da15a87e3b0a5ce7ede66fbdabd1cca5c81ccca8b568bb3dd4a4ee0ac8aa +size 33013 diff --git a/data/2025/2504_12xxx/2504.12369/images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg b/data/2025/2504_12xxx/2504.12369/images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a58c49a6a53e3f6f11c72fa8ad5ffc0b103b7b22 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64f846f47614c7dcf0c08f455255a01c8349d592d3b25b697eb7880e2475883c +size 6913 diff --git a/data/2025/2504_12xxx/2504.12369/images/8316f3b67686e102155ac6518c8ee82688e923ff0be476e4fe46781c9090d2df.jpg b/data/2025/2504_12xxx/2504.12369/images/8316f3b67686e102155ac6518c8ee82688e923ff0be476e4fe46781c9090d2df.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b79c40223e943ae7b67d6b73c85ab93d0288e811 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/8316f3b67686e102155ac6518c8ee82688e923ff0be476e4fe46781c9090d2df.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c72ef81a53de86528c9f2ce01feec458dcfeea52ec8836e5e99594357f4a429d +size 6759 diff --git a/data/2025/2504_12xxx/2504.12369/images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg b/data/2025/2504_12xxx/2504.12369/images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb388b79ed638a54113ce8c67b9ac7de537ad08b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:daaa04c012557f94a745174bdc1a2a35090b80cbc357686cb2e990b2ec86b140 +size 23280 diff --git a/data/2025/2504_12xxx/2504.12369/images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg b/data/2025/2504_12xxx/2504.12369/images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..67fc3444d11c351b8c70706ebde6693ffa6780cc --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef8a272891c02ec4a72d5a3460fd3fca46f9e919d89f319f1d2918d4fcf94aa3 +size 8307 diff --git a/data/2025/2504_12xxx/2504.12369/images/9491f18c075d1aadae6c839aeb0789004bda41f81162625333e60688614b9348.jpg b/data/2025/2504_12xxx/2504.12369/images/9491f18c075d1aadae6c839aeb0789004bda41f81162625333e60688614b9348.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d200492770a709e767faaf6fa753e885c3edcb21 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/9491f18c075d1aadae6c839aeb0789004bda41f81162625333e60688614b9348.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efd21122884a9d6aa043be1403f3e12e1833caa2f2f0a452364465d5857e1e2e +size 25039 diff --git a/data/2025/2504_12xxx/2504.12369/images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg b/data/2025/2504_12xxx/2504.12369/images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab5a2ab67b16d02200b96c27fa3834da2b82e5c0 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58f92b66cdaa19ed6fb7a4ebf2863f6886d0c2f40d1841f66a9365d3bd6b6da4 +size 8213 diff --git a/data/2025/2504_12xxx/2504.12369/images/9e63e34502d19e1a9660587fe1b448ca4cbb22eb6399eea90032701c88909b36.jpg b/data/2025/2504_12xxx/2504.12369/images/9e63e34502d19e1a9660587fe1b448ca4cbb22eb6399eea90032701c88909b36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a0856e7f04bc3bbc30e4d4b0960647663cdf059c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/9e63e34502d19e1a9660587fe1b448ca4cbb22eb6399eea90032701c88909b36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd2e4cf000120019903bebc56bc040875a1411eca88ffe73518c06ca031c0a1c +size 21866 diff --git a/data/2025/2504_12xxx/2504.12369/images/9e73712af021410a3cc6091b7f271192d0761ef5e0bbac919792a0ed2bc5e942.jpg b/data/2025/2504_12xxx/2504.12369/images/9e73712af021410a3cc6091b7f271192d0761ef5e0bbac919792a0ed2bc5e942.jpg new file mode 100644 index 0000000000000000000000000000000000000000..034afc144087546b4c1e20c080a4f423b9e86812 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/9e73712af021410a3cc6091b7f271192d0761ef5e0bbac919792a0ed2bc5e942.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6eceb7d2978d91d1f0e4ffa71d565238efca0ed2feaac19b6c91f2e6171ab268 +size 6374 diff --git a/data/2025/2504_12xxx/2504.12369/images/9e7db2f2124ac20efa2edca5ec9e00b0b6d99dcd36451b763e6533c66aad42f2.jpg b/data/2025/2504_12xxx/2504.12369/images/9e7db2f2124ac20efa2edca5ec9e00b0b6d99dcd36451b763e6533c66aad42f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d47454d41d6cdb90b9ad647251d9fc1cbd94c6c1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/9e7db2f2124ac20efa2edca5ec9e00b0b6d99dcd36451b763e6533c66aad42f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d04ca37cd19320584eba178b37c075e6ccf5c1f453f3cf17925728cba4e3011 +size 6627 diff --git a/data/2025/2504_12xxx/2504.12369/images/a2b0f1636dc3ff7c8dfc235adca8ea0a713423b114cd97c50c1377020f680216.jpg b/data/2025/2504_12xxx/2504.12369/images/a2b0f1636dc3ff7c8dfc235adca8ea0a713423b114cd97c50c1377020f680216.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a069bd0d71c50f25a1d9b0974bda1156aaec2f86 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/a2b0f1636dc3ff7c8dfc235adca8ea0a713423b114cd97c50c1377020f680216.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d6b70fead42805feb337ff9d782124617a6339e88f65c47ba09ca435c46a5b4 +size 22050 diff --git a/data/2025/2504_12xxx/2504.12369/images/a94869f851f3a9f0a5887da9940203db5f53e43246aa8bb56a01ceb394a21328.jpg b/data/2025/2504_12xxx/2504.12369/images/a94869f851f3a9f0a5887da9940203db5f53e43246aa8bb56a01ceb394a21328.jpg new file mode 100644 index 0000000000000000000000000000000000000000..766d2bef1092bfe3fdef70a3e8dc06002f35756b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/a94869f851f3a9f0a5887da9940203db5f53e43246aa8bb56a01ceb394a21328.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f87de821cc2eb430ecc8910af04960dee0b7d7038600ebf987c83ceebdeb77c1 +size 9172 diff --git a/data/2025/2504_12xxx/2504.12369/images/aae9a2f9f9ff8ee0fcb633f9a1bd4ffb65580a3167cf48d007a602089fab10cb.jpg b/data/2025/2504_12xxx/2504.12369/images/aae9a2f9f9ff8ee0fcb633f9a1bd4ffb65580a3167cf48d007a602089fab10cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..756f3e86866ade9c8c5fa21f3f0f74074acfe637 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/aae9a2f9f9ff8ee0fcb633f9a1bd4ffb65580a3167cf48d007a602089fab10cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d499f2345885cf75e9dfdc58009afdef1bdbfee54d512a018be49b045ba979f3 +size 33814 diff --git a/data/2025/2504_12xxx/2504.12369/images/adad793cee2cc8f4ff35518d948a07a8d8ddce810d0efbfd57b5ddbd631ca21e.jpg b/data/2025/2504_12xxx/2504.12369/images/adad793cee2cc8f4ff35518d948a07a8d8ddce810d0efbfd57b5ddbd631ca21e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6b5fdeef90c0c3f43d68fb569937cf8ed28c09d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/adad793cee2cc8f4ff35518d948a07a8d8ddce810d0efbfd57b5ddbd631ca21e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:22126965d3d2ff908b890575d5323ad6008e3b57786de7bb5af35e04f39a419f +size 2938 diff --git a/data/2025/2504_12xxx/2504.12369/images/b2fb6711c620d4ecd7ae763026c5f9b183d24737649a7f6c09253b2753625d9a.jpg b/data/2025/2504_12xxx/2504.12369/images/b2fb6711c620d4ecd7ae763026c5f9b183d24737649a7f6c09253b2753625d9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1d717a1159357e285eb1458a1e57967e7693539 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/b2fb6711c620d4ecd7ae763026c5f9b183d24737649a7f6c09253b2753625d9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e5aa579169fa94c25405cf8667a3b3d8601bd8f54f964b1ca7d0c5ab628ad0f0 +size 7874 diff --git a/data/2025/2504_12xxx/2504.12369/images/b4112e672e093bd908e0d8b47f478e0720181eb791bfc4170bf71a494e2cad04.jpg b/data/2025/2504_12xxx/2504.12369/images/b4112e672e093bd908e0d8b47f478e0720181eb791bfc4170bf71a494e2cad04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46474caa14f1b7ec31a4e35706622fd6111670b1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/b4112e672e093bd908e0d8b47f478e0720181eb791bfc4170bf71a494e2cad04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50ba17a9935246e31b82bdc7f1dc69b417e29c9578d3c04dfdea29874abb1821 +size 6328 diff --git a/data/2025/2504_12xxx/2504.12369/images/b7b3bfbd5ce5428451351fb16ec099c6e94af83843c985d33a1808711993472e.jpg b/data/2025/2504_12xxx/2504.12369/images/b7b3bfbd5ce5428451351fb16ec099c6e94af83843c985d33a1808711993472e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e665202134893902d2c1757c59e33905f237cef --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/b7b3bfbd5ce5428451351fb16ec099c6e94af83843c985d33a1808711993472e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17ba0e1ee1c225c78f1c8f3a7fda9b269106e63cd09104643f605f9c697d88f2 +size 7844 diff --git a/data/2025/2504_12xxx/2504.12369/images/b9bcfbda92d8a3209eb2317a81d6f669a80933add113b0e203523ee7051e8417.jpg b/data/2025/2504_12xxx/2504.12369/images/b9bcfbda92d8a3209eb2317a81d6f669a80933add113b0e203523ee7051e8417.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d731c3460242f3615981983154aed3e7043dde1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/b9bcfbda92d8a3209eb2317a81d6f669a80933add113b0e203523ee7051e8417.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:725052e5ee1bb28434d4d0a4f23f50baed44e5874f3d5892f230c3930b0fb065 +size 5021 diff --git a/data/2025/2504_12xxx/2504.12369/images/bdee5425e500778b85a96d164d5855b2c6ff216180af2cfc50a7e625931f6040.jpg b/data/2025/2504_12xxx/2504.12369/images/bdee5425e500778b85a96d164d5855b2c6ff216180af2cfc50a7e625931f6040.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c55d39b0f3c8f7b9334149e0e086bf6dd69d391 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/bdee5425e500778b85a96d164d5855b2c6ff216180af2cfc50a7e625931f6040.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2221540d8fdc576528e050a1b112173eb1041498b0773da7cac4e15bec20519b +size 36585 diff --git a/data/2025/2504_12xxx/2504.12369/images/c138eeacdb33bbe74ed7ef0bd75d5384412bddd857298e5c09d61013a7190e19.jpg b/data/2025/2504_12xxx/2504.12369/images/c138eeacdb33bbe74ed7ef0bd75d5384412bddd857298e5c09d61013a7190e19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..65f77d8a9bb27cad771b493703c524a6960812e1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/c138eeacdb33bbe74ed7ef0bd75d5384412bddd857298e5c09d61013a7190e19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29080753bc7192af76aeec7635e40a5999d755ee94a77273c99430169245c72b +size 3969 diff --git a/data/2025/2504_12xxx/2504.12369/images/c559b904ce54ed8b960ef64d960db2b2626c58caca3d6c512ff9ee87f3d438a7.jpg b/data/2025/2504_12xxx/2504.12369/images/c559b904ce54ed8b960ef64d960db2b2626c58caca3d6c512ff9ee87f3d438a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..53a7bad92db9a907718bd94e38a348a46e4666ac --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/c559b904ce54ed8b960ef64d960db2b2626c58caca3d6c512ff9ee87f3d438a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f443268899e98380cc203096627d8246154303facae1d049eee5765002cde5e +size 4059 diff --git a/data/2025/2504_12xxx/2504.12369/images/c891de3c6e4cfb733593f322c26a2b2245c57e9ed2ba6ef2a8161dce2e9e97c1.jpg b/data/2025/2504_12xxx/2504.12369/images/c891de3c6e4cfb733593f322c26a2b2245c57e9ed2ba6ef2a8161dce2e9e97c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..28fb7857daf47b4578e515ffb6a3fff07574d12e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/c891de3c6e4cfb733593f322c26a2b2245c57e9ed2ba6ef2a8161dce2e9e97c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec69ab9f49d94bd590b5e8f15b1f538f58a8f6758b2a56a7cde2af9dcbec7f09 +size 18490 diff --git a/data/2025/2504_12xxx/2504.12369/images/ca8924287f821703821d8bf5d62aecb5fe7159ddc23de5a7e7b4d0f81a1a9737.jpg b/data/2025/2504_12xxx/2504.12369/images/ca8924287f821703821d8bf5d62aecb5fe7159ddc23de5a7e7b4d0f81a1a9737.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef9779743237a68ef678a083077e3169f3c963bb --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/ca8924287f821703821d8bf5d62aecb5fe7159ddc23de5a7e7b4d0f81a1a9737.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4088b09181eb0f8066ed33fe77c8fd46007a15ec3afd7f1d96fbdaa0250138ef +size 4809 diff --git a/data/2025/2504_12xxx/2504.12369/images/d1e6c276910048364854297556611dd2a45a1eea429cab3c84376a57362243d5.jpg b/data/2025/2504_12xxx/2504.12369/images/d1e6c276910048364854297556611dd2a45a1eea429cab3c84376a57362243d5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c83889425b3c260ddd6acb4720c9d366959f2efd --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/d1e6c276910048364854297556611dd2a45a1eea429cab3c84376a57362243d5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbbef010198f18dc4dfc6e31eaae3b09a2830f0f240cc9e38cddc85df98214aa +size 31908 diff --git a/data/2025/2504_12xxx/2504.12369/images/d5a5ea0b7abdf3a603a31e858eb811914a167bca52860a3c526c94862c02db14.jpg b/data/2025/2504_12xxx/2504.12369/images/d5a5ea0b7abdf3a603a31e858eb811914a167bca52860a3c526c94862c02db14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cde0837c605345e6f9968b5f1db4c7dda4081cf6 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/d5a5ea0b7abdf3a603a31e858eb811914a167bca52860a3c526c94862c02db14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c61657f92baf3ae5052affca4d3c5bc18dc46ae30bd7962758ce2b8dfcf1bc4 +size 3753 diff --git a/data/2025/2504_12xxx/2504.12369/images/d7f7be95b011155ed7b2564d69783c24b25e5ffb775447a6f51ee1f3ba9ab8fb.jpg b/data/2025/2504_12xxx/2504.12369/images/d7f7be95b011155ed7b2564d69783c24b25e5ffb775447a6f51ee1f3ba9ab8fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c85fe88c68583f52342b0dbd19b553db62a3a367 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/d7f7be95b011155ed7b2564d69783c24b25e5ffb775447a6f51ee1f3ba9ab8fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53b7ddc3f21c86d2a12731f5c4ae6b519dcae3ff03151b32991d8ba1da913d58 +size 5259 diff --git a/data/2025/2504_12xxx/2504.12369/images/d885c7d407ce44ff8633d17891065c18fb770ac9a2dbdb8904c89b0abb280874.jpg b/data/2025/2504_12xxx/2504.12369/images/d885c7d407ce44ff8633d17891065c18fb770ac9a2dbdb8904c89b0abb280874.jpg new file mode 100644 index 0000000000000000000000000000000000000000..098816dec55c44687194359ddd01fae47a7c5e15 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/d885c7d407ce44ff8633d17891065c18fb770ac9a2dbdb8904c89b0abb280874.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e1c30724eee948868fc8fe357d839676f1f402641e1edcfe0c317c30ef16ee2 +size 7034 diff --git a/data/2025/2504_12xxx/2504.12369/images/ddc2586f3345e661f33248b6cc0087c4703090e0712fed894a0128507250e4b5.jpg b/data/2025/2504_12xxx/2504.12369/images/ddc2586f3345e661f33248b6cc0087c4703090e0712fed894a0128507250e4b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d80e3fa74914ac8ce6ab13b9f0f8aeb801fe6f1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/ddc2586f3345e661f33248b6cc0087c4703090e0712fed894a0128507250e4b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdb8ff14e2b3065f3e2d6c27e5b6d879fcd0d1b4fd6e3269c0befeb27e639ace +size 30786 diff --git a/data/2025/2504_12xxx/2504.12369/images/e21770768b7f5933c8a6579cb05bb882d2db0cd3a528b963e76b28e85fc3f88e.jpg b/data/2025/2504_12xxx/2504.12369/images/e21770768b7f5933c8a6579cb05bb882d2db0cd3a528b963e76b28e85fc3f88e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3548b20c3438630fafee91e9408b78764209ac9c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/e21770768b7f5933c8a6579cb05bb882d2db0cd3a528b963e76b28e85fc3f88e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:674b98f1592b078e4f6170d995f6a2aaee63fe6e6282b3abf1fa1eaf7eb419ed +size 8014 diff --git a/data/2025/2504_12xxx/2504.12369/images/e45872d86a46e6fd48f3db8a029253a71ef4ec766b935af977cd99b1b0592ac2.jpg b/data/2025/2504_12xxx/2504.12369/images/e45872d86a46e6fd48f3db8a029253a71ef4ec766b935af977cd99b1b0592ac2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..642047f11fc8a89fa7ac38b11c86f07cd542040a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/e45872d86a46e6fd48f3db8a029253a71ef4ec766b935af977cd99b1b0592ac2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fab714c86078e9f2c1dd8eb35554181ddcc9f98bc49e3a934c41319ff399cc00 +size 8053 diff --git a/data/2025/2504_12xxx/2504.12369/images/e7a0d754531749230bbd4a9a05832bffb34d64393251380876fd18cad297f056.jpg b/data/2025/2504_12xxx/2504.12369/images/e7a0d754531749230bbd4a9a05832bffb34d64393251380876fd18cad297f056.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6a0009798ff864b5e3d2b901aab3de0d4ba7f81 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/e7a0d754531749230bbd4a9a05832bffb34d64393251380876fd18cad297f056.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f937e6c79c5b36e7b0699e01ded8638f0323c29f439b6e8a6c62b0037d14f14 +size 6709 diff --git a/data/2025/2504_12xxx/2504.12369/images/ef99e53069789de1c72b40dec9daf83482c8b1d58b900b04b7b673a7536cdbeb.jpg b/data/2025/2504_12xxx/2504.12369/images/ef99e53069789de1c72b40dec9daf83482c8b1d58b900b04b7b673a7536cdbeb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d4e8ede1db1e9ef8b67514ff95da108fe384b62 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/ef99e53069789de1c72b40dec9daf83482c8b1d58b900b04b7b673a7536cdbeb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2690950270ec4ad40ee09ee53813b628ba07c1d612e91d19375eb37db7bb08e7 +size 6554 diff --git a/data/2025/2504_12xxx/2504.12369/images/f0004364140102367ad4d60778876a7c4c25fdc4c20dc2f01eae89736c3023e5.jpg b/data/2025/2504_12xxx/2504.12369/images/f0004364140102367ad4d60778876a7c4c25fdc4c20dc2f01eae89736c3023e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11ee32b55de9e4c689ad26e64325b24f1578c543 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/f0004364140102367ad4d60778876a7c4c25fdc4c20dc2f01eae89736c3023e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7416b7edc39d7c2e2ac90952d01107ebfb9c1b60c1798988db7c46e41d9f54a +size 19000 diff --git a/data/2025/2504_12xxx/2504.12369/images/f69aaeecb41bae0b361ddaf6325948bcdd310d6207adc846da6a8605dba8f003.jpg b/data/2025/2504_12xxx/2504.12369/images/f69aaeecb41bae0b361ddaf6325948bcdd310d6207adc846da6a8605dba8f003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8319f09b1c4183ecdf8e7f8eedda61c2b6d2d173 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/f69aaeecb41bae0b361ddaf6325948bcdd310d6207adc846da6a8605dba8f003.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c67aff78cea8242a41d060545838c9fc915228c5697171fa9f87498ded1dc0a9 +size 44004 diff --git a/data/2025/2504_12xxx/2504.12369/images/fa911a9e797ad09c23c99f30039cff030ba8c0a685c01c115f0232972453d2aa.jpg b/data/2025/2504_12xxx/2504.12369/images/fa911a9e797ad09c23c99f30039cff030ba8c0a685c01c115f0232972453d2aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..75da538e90dbdba7473e86f2fb7c7c21941a4424 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/fa911a9e797ad09c23c99f30039cff030ba8c0a685c01c115f0232972453d2aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:519f15d09e252e4de3ec8535adf38429293ad4e24e2a0ce144666f8941284d40 +size 16226 diff --git a/data/2025/2504_12xxx/2504.12369/images/faac491d8ffa58fdb88b64c55cd0ac817d3a060c561d4c7658041b8daa232b65.jpg b/data/2025/2504_12xxx/2504.12369/images/faac491d8ffa58fdb88b64c55cd0ac817d3a060c561d4c7658041b8daa232b65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9843473da15d8b672f5706c05f2faf0242c4cebc --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/faac491d8ffa58fdb88b64c55cd0ac817d3a060c561d4c7658041b8daa232b65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c4c1df21498718bc93633b245ec8c915dd8e2412f2b5d1e1b2cbd6d4b53ddf2 +size 5494 diff --git a/data/2025/2504_12xxx/2504.12369/images/ff7c8d9812eb871f60dc03ba6cb00e8a26be809bd3b3c043a26142ca10e90a7e.jpg b/data/2025/2504_12xxx/2504.12369/images/ff7c8d9812eb871f60dc03ba6cb00e8a26be809bd3b3c043a26142ca10e90a7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7fcc0acc243a88309c989f74b64541394062562e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/images/ff7c8d9812eb871f60dc03ba6cb00e8a26be809bd3b3c043a26142ca10e90a7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7991af17d04a4ee8371d638d7880bbec4af759856f41841bf47a5e875d78dcf2 +size 4222 diff --git a/data/2025/2504_12xxx/2504.12369/layout.json b/data/2025/2504_12xxx/2504.12369/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6dd59c3eca2f0eb68a2e87ec57dffef606786117 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12369/layout.json @@ -0,0 +1,13319 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 167, + 97, + 444, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 97, + 444, + 138 + ], + "spans": [ + { + "bbox": [ + 167, + 97, + 444, + 138 + ], + "type": "text", + "content": "WORLDMEM: Long-term Consistent World Simulation with Memory" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "spans": [ + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "content": "Zeqi Xiao" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "content": " Yushi Lan" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "content": " Yifan Zhou" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "content": " Wenqi Ouyang" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "content": " Shuai Yang" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "content": " Yanhong Zeng" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "text", + "content": " Xingang Pan" + }, + { + "bbox": [ + 180, + 178, + 428, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 217, + 204, + 394, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 204, + 394, + 217 + ], + "spans": [ + { + "bbox": [ + 217, + 204, + 394, + 217 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 217, + 204, + 394, + 217 + ], + "type": "text", + "content": "S-Lab, Nanyang Technological University," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 173, + 217, + 436, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 173, + 217, + 436, + 228 + ], + "spans": [ + { + "bbox": [ + 173, + 217, + 436, + 228 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 173, + 217, + 436, + 228 + ], + "type": "text", + "content": "Wangxuan Institute of Computer Technology, Peking University" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 253, + 228, + 357, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 253, + 228, + 357, + 239 + ], + "spans": [ + { + "bbox": [ + 253, + 228, + 357, + 239 + ], + "type": "text", + "content": "3Shanghai AI Laboratory" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 147, + 239, + 463, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 239, + 463, + 251 + ], + "spans": [ + { + "bbox": [ + 147, + 239, + 463, + 251 + ], + "type": "text", + "content": "{zeqi001, yushi001, yifan006, wenqi.ouyang, xingang.pan}@ntu.edu.sg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 201, + 251, + 410, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 251, + 410, + 261 + ], + "spans": [ + { + "bbox": [ + 201, + 251, + 410, + 261 + ], + "type": "text", + "content": "williamyang@pku.edu.cn, zengyh1900@gmail.com" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 281, + 289, + 329, + 301 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 289, + 329, + 301 + ], + "spans": [ + { + "bbox": [ + 281, + 289, + 329, + 301 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 314, + 470, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 314, + 470, + 468 + ], + "spans": [ + { + "bbox": [ + 140, + 314, + 470, + 468 + ], + "type": "text", + "content": "World simulation has gained increasing popularity due to its ability to model virtual environments and predict the consequences of actions. However, the limited temporal context window often leads to failures in maintaining long-term consistency, particularly in preserving 3D spatial consistency. In this work, we present WOrLD-MEM, a framework that enhances scene generation with a memory bank consisting of memory units that store memory frames and states (e.g., poses and timestamps). By employing state-aware memory attention that effectively extracts relevant information from these memory frames based on their states, our method is capable of accurately reconstructing previously observed scenes, even under significant viewpoint or temporal gaps. Furthermore, by incorporating timestamps into the states, our framework not only models a static world but also captures its dynamic evolution over time, enabling both perception and interaction within the simulated world. Extensive experiments in both virtual and real scenarios validate the effectiveness of our approach. Project page at https://xizaoqu.github.io/worldmem." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 517, + 506, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 605 + ], + "type": "text", + "content": "World simulation has gained significant attention for its ability to model environments and predict the outcomes of actions (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024). Recent advances in video diffusion models have further propelled this field, enabling high-fidelity rollouts of potential future scenarios based on user actions, such as navigating through an environment or interacting with objects. These capabilities make world simulators particularly promising for applications in autonomous navigation (Feng et al., 2024; Bar et al., 2024) and as viable alternatives to traditional game engines (Decart et al., 2024; Parker-Holder et al., 2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 609, + 504, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 687 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 687 + ], + "type": "text", + "content": "Despite these advances, a fundamental challenge remains: the limited probing horizon. Due to computational and memory constraints, video generative models operate within a fixed context window and are unable to condition on the full sequence of past generations. Consequently, most existing methods simply discard previously generated content, leading to a critical issue of world inconsistency, which is also revealed in Wang et al. (2025). As illustrated in Figure 1(a), when the camera moves away and returns, the regenerated content diverges from the earlier scene, violating the coherence expected in a consistent world." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 692, + 504, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 692, + 504, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 692, + 504, + 715 + ], + "type": "text", + "content": "A natural solution is to maintain an external memory that stores and retrieves relevant historical information outside the generative loop. While intuitive, formulating such a memory mechanism is" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 223, + 35, + 567 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 223, + 35, + 567 + ], + "spans": [ + { + "bbox": [ + 14, + 223, + 35, + 567 + ], + "type": "text", + "content": "arXiv:2504.12369v2 [cs.CV] 2 Dec 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 731, + 385, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 385, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 385, + 742 + ], + "type": "text", + "content": "39th Conference on Neural Information Processing Systems (NeurIPS 2025)." + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 71, + 506, + 281 + ], + "blocks": [ + { + "bbox": [ + 107, + 71, + 506, + 281 + ], + "lines": [ + { + "bbox": [ + 107, + 71, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 107, + 71, + 506, + 281 + ], + "type": "image", + "image_path": "2cedaf771a3bc9c255e1950c8a7a8826919dba3fb6d4f8b211d37dc47c3d69f4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 289, + 506, + 376 + ], + "lines": [ + { + "bbox": [ + 104, + 289, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 506, + 376 + ], + "type": "text", + "content": "Figure 1: WORLDMEM enables long-term consistent world generation with an integrated memory mechanism. (a) Previous world generation methods typically face the problem of inconsistent world due to limited temporal context window size. (b) WORLDMEM empowers the agent to explore diverse and consistent worlds with an expansive action space, e.g., crafting environments by placing objects like pumpkin light or freely roaming around. Most importantly, after exploring for a while and glancing back, we find the objects we placed are still there, with the inspiring sight of the light melting the surrounding snow, testifying to the passage of time. Red and green boxes indicate scenes that should be consistent." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 389, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 389, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 389, + 504, + 422 + ], + "type": "text", + "content": "non-trivial. A direct approach might involve explicit 3D scene reconstruction to preserve geometry and detail. However, 3D representations are inflexible in dynamic and evolving environments and are prone to loss of detail, especially for large, unbounded scenes (Wu et al., 2025a)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 427, + 506, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 506, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 506, + 537 + ], + "type": "text", + "content": "Instead, we argue that geometry-free representations offer a more flexible solution. These representations, however, pose their own challenges – particularly in balancing detail retention with memory scalability. For example, implicit approaches like storing abstract features via LoRA modules (Hong et al., 2024) offer compactness but lose visual fidelity and spatial specificity. Some recent works represent visual scenes as discrete tokens encoding fine-grained visual information (Sajjadi et al., 2022; Jiang et al., 2025), but they are limited by a fixed token and struggle to capture the complexity of diverse and evolving environments. To address this issue, we observe that for generating the immediate future, only a small subset of historical content is typically relevant. Based on this, we propose a token-level memory bank that stores all previously generated latent tokens, and retrieves a targeted subset for each generation step based on relevance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 541, + 506, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 506, + 652 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 506, + 652 + ], + "type": "text", + "content": "Conditioning on the retrieved memory requires spatial-temporal reasoning. In contrast to prior work where memory aids local temporal smoothness (Zheng et al., 2024a) or semantic coherence (Wu et al., 2025b; Rahman et al., 2023), long-term world simulation demands reasoning over large spatiotemporal gaps, e.g., memory and query may differ in viewpoint and time, and retain exact scenes with detail. To facilitate this reasoning, we propose augmenting each memory unit with explicit state cues, including spatial location, viewpoint, and timestamp. These cues serve as anchors for reasoning and are embedded as part of the query-key attention mechanism. Through this state-aware attention, our model can effectively reason the current frame with past observations, facilitating accurate and coherent generation. Importantly, such a design leverages standard attention architectures, enabling it to scale naturally with modern hardware and model capacity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 656, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 506, + 723 + ], + "type": "text", + "content": "Motivated by this idea, we build our approach, WOrLDMEM, on top of the Conditional Diffusion Transformer (CDiT) (Peebles and Xie, 2023) and the Diffusion Forcing (DF) paradigm (Chen et al., 2025), which autoregressively generates first-person viewpoints conditioned on external action signals. As discussed above, at the core of WOrLDMEM is a memory mechanism composed of a memory bank and memory attention. To ensure efficient and relevant memory retrieval from the bank, we introduce a confidence-based selection strategy that scores memory units based on field-of-view" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "(FOV) overlap and temporal proximity. In the memory attention, the latent tokens being generated act as queries, attending to the memory tokens (as keys and values) to incorporate relevant historical context. To ensure robust correspondence across varying viewpoints and time gaps, we enrich both queries and keys with state-aware embeddings. A relative embedding design is introduced to ease the learning of spatial and temporal relationships. This pipeline enables precise, scalable reasoning over long-range memory, ensuring consistency in dynamic and evolving world simulations." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 144, + 506, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 242 + ], + "type": "text", + "content": "We evaluate WOrLDMEM on a customized Minecraft benchmark (Fan et al., 2022) and on RealEstate10K (Zhou et al., 2018). The Minecraft benchmark includes diverse terrains (e.g., plains, savannas, and deserts) and various action modalities (movement, viewpoint control, and event triggers), which is a wonderful environment for idea verification. Extensive experiments show that WOrLDMEM significantly improves 3D spatial consistency, enabling robust viewpoint reasoning and high-fidelity scene generation, as shown in Figure 1(b). Furthermore, in dynamic environments, WOrLDMEM accurately tracks and follows evolving events and environment changes, demonstrating its ability to both perceive and interact with the generated world. We hope our promising results and scalable designs will inspire future research on memory-based world simulation." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 269, + 198, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 269, + 198, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 198, + 282 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 300, + 506, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 506, + 422 + ], + "type": "text", + "content": "Video diffusion model. With the rapid advancement of diffusion models (Song et al., 2020; Peebles and Xie, 2023; Chen et al., 2025), video generation has made significant strides (Wang et al., 2023a,b; Chen et al., 2023; Guo et al., 2023; OpenAI, 2024; Jin et al., 2024; Yin et al., 2024). The field has evolved from traditional U-Net-based architectures (Wang et al., 2023a; Chen et al., 2023; Guo et al., 2023) to Transformer-based frameworks (OpenAI, 2024; Ma et al., 2024; Zheng et al., 2024b), enabling video diffusion models to generate highly realistic and temporally coherent videos. Recently, autoregressive video generation (Chen et al., 2025; Kim et al., 2024; Henschel et al., 2024) has emerged as a promising approach to extend video length, theoretically indefinitely. Notably, Diffusion Forcing (Chen et al., 2025) introduces a per-frame noise-level denoising paradigm. Unlike the full-sequence paradigm, which applies a uniform noise level across all frames, per-frame noise-level denoising offers a more flexible approach, enabling autoregressive generation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 427, + 506, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 427, + 506, + 525 + ], + "spans": [ + { + "bbox": [ + 104, + 427, + 506, + 525 + ], + "type": "text", + "content": "Interactive world simulation. World simulation aims to model an environment by predicting the next state given the current state and action. This concept has been extensively explored in the construction of world models (Ha and Schmidhuber, 2018b) for agent learning (Ha and Schmidhuber, 2018a; Hafner et al., 2019, 2020; Hu et al., 2023; Beattie et al., 2016; Yang et al., 2023). With advances in video generation, high-quality world simulation with robust control has become feasible, leading to numerous works focusing on interactive world simulation (Bar et al., 2024; Decart et al., 2024; Alonso et al., 2025; Feng et al., 2024; Parker-Holder et al., 2024; Valevski et al., 2024; Yu et al., 2025c,a,b). These approaches enable agents to navigate generated environments and interact with them based on external commands." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 531, + 506, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 506, + 553 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 506, + 553 + ], + "type": "text", + "content": "However, due to context window limitations, such methods discard previously generated content, leading to inconsistencies in the simulated world, particularly in maintaining 3D spatial coherence." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 558, + 506, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 506, + 722 + ], + "type": "text", + "content": "Consistent world simulation. Ensuring the consistency of a generated world is crucial for effective world simulation Wang et al. (2025). Existing approaches can be broadly categorized into two types: geometric-based and geometric-free. The geometric-based methods explicitly reconstruct the generated world into a 3D/4D representation (Liu et al., 2024; Gao et al., 2024; Wang and Agapito, 2024; Ren et al., 2025; Yu et al., 2024b,a; Liang et al., 2024). While this strategy can reliably maintain consistency, it imposes strict constraints on flexibility: Once the world is reconstructed, modifying or interacting with it becomes challenging. Geometric-free methods focus on implicit learning. Methods like Alonso et al. (2025); Valevski et al. (2024) ensure consistency by overfitting to predefined scenarios (e.g., specific CS:GO or DOOM maps), limiting scalability. StreamingT2V (Henschel et al., 2024) maintains long-term consistency by continuing on both global and local visual contexts from previous frames, while SlowFastGen (Hong et al., 2024) progressively trains LoRA (Hu et al., 2022) modules for memory recall. However, these methods rely on abstract representations, making accurate scene reconstruction challenging. In contrast, our approach retrieves information from previously generated frames and their states, ensuring world consistency without overfitting to specific scenarios." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 115, + 77, + 379, + 154 + ], + "blocks": [ + { + "bbox": [ + 115, + 77, + 379, + 154 + ], + "lines": [ + { + "bbox": [ + 115, + 77, + 379, + 154 + ], + "spans": [ + { + "bbox": [ + 115, + 77, + 379, + 154 + ], + "type": "image", + "image_path": "17f283519eda9a5331b73da78c30e9f49bf3b0344d40c5194698866ef6a8043e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 408, + 87, + 471, + 134 + ], + "blocks": [ + { + "bbox": [ + 408, + 87, + 471, + 134 + ], + "lines": [ + { + "bbox": [ + 408, + 87, + 471, + 134 + ], + "spans": [ + { + "bbox": [ + 408, + 87, + 471, + 134 + ], + "type": "image", + "image_path": "710060b8d65f17b785353128df68a37c04d5ccfe3c20236be522f6805024dbe3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 116, + 163, + 226, + 252 + ], + "blocks": [ + { + "bbox": [ + 116, + 163, + 226, + 252 + ], + "lines": [ + { + "bbox": [ + 116, + 163, + 226, + 252 + ], + "spans": [ + { + "bbox": [ + 116, + 163, + 226, + 252 + ], + "type": "image", + "image_path": "a94869f851f3a9f0a5887da9940203db5f53e43246aa8bb56a01ceb394a21328.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 261, + 202, + 270 + ], + "lines": [ + { + "bbox": [ + 144, + 261, + 202, + 270 + ], + "spans": [ + { + "bbox": [ + 144, + 261, + 202, + 270 + ], + "type": "text", + "content": "(c) State Embedding" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 258, + 161, + 493, + 258 + ], + "blocks": [ + { + "bbox": [ + 419, + 143, + 476, + 152 + ], + "lines": [ + { + "bbox": [ + 419, + 143, + 476, + 152 + ], + "spans": [ + { + "bbox": [ + 419, + 143, + 476, + 152 + ], + "type": "text", + "content": "(b) Input Difference" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 258, + 161, + 493, + 258 + ], + "lines": [ + { + "bbox": [ + 258, + 161, + 493, + 258 + ], + "spans": [ + { + "bbox": [ + 258, + 161, + 493, + 258 + ], + "type": "image", + "image_path": "0eaba4c9b0918d5cb17309e5aac57ca03240e9a5a335a15ff79f2279e7e8be2c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 351, + 261, + 405, + 270 + ], + "lines": [ + { + "bbox": [ + 351, + 261, + 405, + 270 + ], + "spans": [ + { + "bbox": [ + 351, + 261, + 405, + 270 + ], + "type": "text", + "content": "(d) Memory Block" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 278, + 504, + 334 + ], + "lines": [ + { + "bbox": [ + 104, + 278, + 504, + 334 + ], + "spans": [ + { + "bbox": [ + 104, + 278, + 504, + 334 + ], + "type": "text", + "content": "Figure 2: Comprehensive overview of WOrLDMEM. The framework comprises a conditional diffusion transformer integrated with memory blocks, with a dedicated memory bank storing memory units from previously generated content. By retrieving these memory units from the memory bank and incorporating the information by memory blocks to guide generation, our approach ensures long-term consistency in world simulation." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 343, + 196, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 343, + 196, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 196, + 355 + ], + "type": "text", + "content": "3 WORLDMEM" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 367, + 504, + 401 + ], + "type": "text", + "content": "This section details the methodology of WOrLDMEM. Sec. 3.1 introduces the relevant preliminaries, while Sec. 3.2 describes the interactive world simulator serving as our baseline. Sec. 3.3 and 3.4 present the core of our proposed memory mechanism." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 413, + 181, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 413, + 181, + 425 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 181, + 425 + ], + "type": "text", + "content": "3.1 Preliminary" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 433, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 433, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 433, + 504, + 456 + ], + "type": "text", + "content": "Video diffusion models. Video diffusion models generate video sequences by iteratively denoising Gaussian noise through a learned reverse process:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 217, + 458, + 504, + 473 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 217, + 458, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 217, + 458, + 504, + 473 + ], + "type": "interline_equation", + "content": "p _ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k - 1} \\mid \\mathbf {x} _ {t} ^ {k}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t} ^ {k - 1}; \\mu_ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k}, k\\right), \\sigma_ {k} ^ {2} \\mathbf {I}\\right), \\tag {1}", + "image_path": "128fabee19e5abaad9587da0de6cd970dc5cc8944b3b1196aad6e7166dc04fe7.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": "where all frames " + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_t^k)_{1\\leq t\\leq T}" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": " share the same noise level " + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": " is the context window length. This full-sequence approach enables global guidance but lacks flexibility in sequence length and autoregressive generation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 515, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 515, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 515, + 504, + 559 + ], + "type": "text", + "content": "Autoregressive video generation. Autoregressive video generation aims to extend videos over the long term by predicting frames sequentially (Kondratyuk et al., 2024; Wu et al., 2023). While various methods exist for autoregressive generation, Diffusion Forcing (DF) (Chen et al., 2025) provides a neat and effective approach to achieve this. Specifically, DF introduces per-frame noise levels " + }, + { + "bbox": [ + 104, + 515, + 504, + 559 + ], + "type": "inline_equation", + "content": "k_{t}" + }, + { + "bbox": [ + 104, + 515, + 504, + 559 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 208, + 562, + 504, + 578 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 208, + 562, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 208, + 562, + 504, + 578 + ], + "type": "interline_equation", + "content": "p _ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k _ {t} - 1} \\mid \\mathbf {x} _ {t} ^ {k _ {t}}\\right) = \\mathcal {N} \\left(\\mathbf {x} _ {t} ^ {k _ {t} - 1}; \\mu_ {\\theta} \\left(\\mathbf {x} _ {t} ^ {k _ {t}}, k _ {t}\\right), \\sigma_ {k _ {t}} ^ {2} \\mathbf {I}\\right), \\tag {2}", + "image_path": "8316f3b67686e102155ac6518c8ee82688e923ff0be476e4fe46781c9090d2df.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 585, + 504, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 504, + 618 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 504, + 618 + ], + "type": "text", + "content": "Unlike full-sequence diffusion, DF generates video flexibly and stably beyond the training horizon. Autoregressive generation is a special case when only the last one or a few frames are noisy. With autoregressive video generation, long-term interactive world simulation becomes feasible." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 630, + 255, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 630, + 255, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 630, + 255, + 641 + ], + "type": "text", + "content": "3.2 Interactive World Simulation" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 651, + 504, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 504, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 504, + 695 + ], + "type": "text", + "content": "Before introducing the memory mechanism, we first present our interactive world simulator, which models long video sequences using an auto-regressive conditional diffusion transformer. Interaction is achieved by embedding external control signals, primarily actions, into the model through dedicated conditioning modules (Parker-Holder et al., 2024; Decart et al., 2024; Yu et al., 2025c)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": "Following prior work (Decart et al., 2024), we adopt a conditional Diffusion Transformer (DiT) (Peebles and Xie, 2023) architecture for video generation, and Diffusion Forecasting (DF) (Chen et al.," + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "2025) for autoregressive prediction. As shown in Figure 2(a), our model consists of multiple DiT blocks with spatial and temporal modules for spatiotemporal reasoning. The temporal module applies causal attention to ensure that each frame only attends to preceding frames." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 199 + ], + "type": "text", + "content": "The actions are injected by first projected into the embedding space using a multi-layer perceptron (MLP). The resulting action embeddings are added to the denoising timestep embeddings and injected into the temporal blocks using Adaptive Layer Normalization (AdaLN) (Xu et al., 2019), following the paradigm of Bar et al. (2024); Decart et al. (2024). In our Minecraft experiments, the action space contains 25 dimensions, including movements, view adjustments, and event triggers. We also apply timestep embeddings to the spatial blocks in the same manner, although this is omitted from the figure for clarity. Standard architectural components such as residual connections, multi-head attention, and feedforward networks are also not shown." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 203, + 504, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 203, + 504, + 248 + ], + "spans": [ + { + "bbox": [ + 104, + 203, + 504, + 248 + ], + "type": "text", + "content": "The combination of conditional DiT and DF provides a strong baseline for long-term interactive video generation. However, due to the computational cost of video synthesis, the temporal context window remains limited. As a result, content outside this window is forgotten, which leads to inconsistencies during long-term generation (Decart et al., 2024)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 262, + 294, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 262, + 294, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 262, + 294, + 274 + ], + "type": "text", + "content": "3.3 Memory Representation and Retrieval" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "content": "To address the limited context window of video generative models, we introduce a memory mechanism that enables the model to retain and retrieve information beyond the current generation window. This mechanism maintains a memory bank composed of historical frames and their associated state information: " + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{x}_i^m,\\mathbf{p}_i,t_i)\\}_{i = 1}^N" + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "content": " where " + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{x}_i^m" + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "content": " denotes a memory frame, " + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{p}_i\\in \\mathbb{R}^5" + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "content": " (x,y,z, pitch, yaw) is its pose, and " + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "inline_equation", + "content": "t_i" + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "content": " is the timestamp. Each tuple is referred to as a memory unit. We save " + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "inline_equation", + "content": "\\mathbf{m}_i" + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "content": " in token-level, which is compressed by the visual encoder but retains enough details for reconstruction. The corresponding states " + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{p},t)\\}" + }, + { + "bbox": [ + 104, + 282, + 278, + 479 + ], + "type": "text", + "content": " play a critical role not only in memory retrieval but also in enabling state-aware memory conditioning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 283, + 285, + 460, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 285, + 460, + 298 + ], + "spans": [ + { + "bbox": [ + 283, + 285, + 460, + 298 + ], + "type": "text", + "content": "Algorithm 1: Memory Retrieval Algorithm" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 283, + 300, + 446, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 300, + 446, + 321 + ], + "spans": [ + { + "bbox": [ + 283, + 300, + 446, + 321 + ], + "type": "text", + "content": "Input: Memory bank of " + }, + { + "bbox": [ + 283, + 300, + 446, + 321 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 283, + 300, + 446, + 321 + ], + "type": "text", + "content": " historical states " + }, + { + "bbox": [ + 283, + 300, + 446, + 321 + ], + "type": "inline_equation", + "content": "\\{(\\mathbf{x}_i^m,\\mathbf{p}_i,t_i)\\}_{i = 1}^N;" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 283, + 321, + 491, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 321, + 491, + 331 + ], + "spans": [ + { + "bbox": [ + 283, + 321, + 491, + 331 + ], + "type": "text", + "content": "Current state " + }, + { + "bbox": [ + 283, + 321, + 491, + 331 + ], + "type": "inline_equation", + "content": "(\\mathbf{x}_c,\\mathbf{p}_c,t_c)" + }, + { + "bbox": [ + 283, + 321, + 491, + 331 + ], + "type": "text", + "content": " ; memory condition length " + }, + { + "bbox": [ + 283, + 321, + 491, + 331 + ], + "type": "inline_equation", + "content": "L_{M}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "spans": [ + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "text", + "content": "Similarity threshold " + }, + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "inline_equation", + "content": "tr" + }, + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "text", + "content": "; weights " + }, + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "inline_equation", + "content": "w_{o}" + }, + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "inline_equation", + "content": "w_{t}" + }, + { + "bbox": [ + 283, + 331, + 430, + 340 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 283, + 340, + 435, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 340, + 435, + 350 + ], + "spans": [ + { + "bbox": [ + 283, + 340, + 435, + 350 + ], + "type": "text", + "content": "Output: A list of selected state indices " + }, + { + "bbox": [ + 283, + 340, + 435, + 350 + ], + "type": "inline_equation", + "content": "S" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 283, + 350, + 393, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 350, + 393, + 360 + ], + "spans": [ + { + "bbox": [ + 283, + 350, + 393, + 360 + ], + "type": "text", + "content": "Compute Confidence Score:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 283, + 360, + 492, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 360, + 492, + 370 + ], + "spans": [ + { + "bbox": [ + 283, + 360, + 492, + 370 + ], + "type": "text", + "content": "Compute FOV overlap ratio o via Monte Carlo sampling." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 283, + 370, + 482, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 370, + 482, + 380 + ], + "spans": [ + { + "bbox": [ + 283, + 370, + 482, + 380 + ], + "type": "text", + "content": "Compute time difference " + }, + { + "bbox": [ + 283, + 370, + 482, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{d} = \\mathrm{Concat}\\big(\\{|t_i - t_c|\\}_{i = 1}^n\\big)" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 283, + 380, + 440, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 380, + 440, + 390 + ], + "spans": [ + { + "bbox": [ + 283, + 380, + 440, + 390 + ], + "type": "text", + "content": "Compute confidence " + }, + { + "bbox": [ + 283, + 380, + 440, + 390 + ], + "type": "inline_equation", + "content": "\\alpha = \\mathbf{o}\\cdot w_{o} - \\mathbf{d}\\cdot w_{t}" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 283, + 394, + 419, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 394, + 419, + 403 + ], + "spans": [ + { + "bbox": [ + 283, + 394, + 419, + 403 + ], + "type": "text", + "content": "Selection with Similarity Filtering:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 283, + 404, + 346, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 404, + 346, + 413 + ], + "spans": [ + { + "bbox": [ + 283, + 404, + 346, + 413 + ], + "type": "text", + "content": "Initialize " + }, + { + "bbox": [ + 283, + 404, + 346, + 413 + ], + "type": "inline_equation", + "content": "S = \\varnothing" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 283, + 414, + 363, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 414, + 363, + 424 + ], + "spans": [ + { + "bbox": [ + 283, + 414, + 363, + 424 + ], + "type": "text", + "content": "for " + }, + { + "bbox": [ + 283, + 414, + 363, + 424 + ], + "type": "inline_equation", + "content": "m = 1" + }, + { + "bbox": [ + 283, + 414, + 363, + 424 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 283, + 414, + 363, + 424 + ], + "type": "inline_equation", + "content": "L_{M}" + }, + { + "bbox": [ + 283, + 414, + 363, + 424 + ], + "type": "text", + "content": " do" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 294, + 424, + 390, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 424, + 390, + 434 + ], + "spans": [ + { + "bbox": [ + 294, + 424, + 390, + 434 + ], + "type": "text", + "content": "Select " + }, + { + "bbox": [ + 294, + 424, + 390, + 434 + ], + "type": "inline_equation", + "content": "i^{*}" + }, + { + "bbox": [ + 294, + 424, + 390, + 434 + ], + "type": "text", + "content": " with highest " + }, + { + "bbox": [ + 294, + 424, + 390, + 434 + ], + "type": "inline_equation", + "content": "\\alpha_{i^{*}}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 294, + 434, + 353, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 434, + 353, + 443 + ], + "spans": [ + { + "bbox": [ + 294, + 434, + 353, + 443 + ], + "type": "text", + "content": "Append " + }, + { + "bbox": [ + 294, + 434, + 353, + 443 + ], + "type": "inline_equation", + "content": "i^{*}" + }, + { + "bbox": [ + 294, + 434, + 353, + 443 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 294, + 434, + 353, + 443 + ], + "type": "inline_equation", + "content": "S" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 294, + 443, + 448, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 294, + 443, + 448, + 454 + ], + "spans": [ + { + "bbox": [ + 294, + 443, + 448, + 454 + ], + "type": "text", + "content": "Remove all " + }, + { + "bbox": [ + 294, + 443, + 448, + 454 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 294, + 443, + 448, + 454 + ], + "type": "text", + "content": " where similarity " + }, + { + "bbox": [ + 294, + 443, + 448, + 454 + ], + "type": "inline_equation", + "content": "(i^{*},j) > tr" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 285, + 456, + 319, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 456, + 319, + 464 + ], + "spans": [ + { + "bbox": [ + 285, + 456, + 319, + 464 + ], + "type": "text", + "content": "return " + }, + { + "bbox": [ + 285, + 456, + 319, + 464 + ], + "type": "inline_equation", + "content": "S" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 484, + 506, + 562 + ], + "type": "text", + "content": "Memory Retrieval. Since the number of memory frames available for conditioning is limited, an efficient strategy is required to sample memory units from the memory bank. We adopt a greedy matching algorithm based on frame-pair similarity, where similarity is defined using the field-of-view (FOV) overlap ratio and timestamp differences as confidence measures. Algorithm 1 presents our approach to memory retrieval. Although simple, this strategy proves effective in retrieving relevant information for conditioning. Moreover, the model's reasoning over memory helps maintain performance even when the retrieved content is imperfect." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 105, + 575, + 265, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 575, + 265, + 587 + ], + "spans": [ + { + "bbox": [ + 105, + 575, + 265, + 587 + ], + "type": "text", + "content": "3.4 State-aware Memory Condition" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 596, + 506, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 506, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 506, + 673 + ], + "type": "text", + "content": "After retrieving necessary memory units, unlike prior methods that use memory mainly for temporal smoothness (Zheng et al., 2024a) or semantic guidance (Wu et al., 2025b; Rahman et al., 2023), our goal is to explicitly reconstruct previously seen visual content – even under significant viewpoint or scene changes. This requires the model to perform spatiotemporal reasoning to extract relevant information from memory, which we model using cross-attention (Vaswani et al., 2017). Since relying solely on visual tokens can be ambiguous, we incorporate the corresponding states as cues to enable state-aware attention." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "State Embedding. State embedding provides essential spatial and temporal context for memory retrieval. To encode spatial information, we adopt Plücker embedding (Sitzmann et al., 2021) to convert 5D poses " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathbf{p} \\in \\mathbb{R}^5" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": " into dense positional features " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathrm{PE}(\\mathbf{p}) \\in \\mathbb{R}^{h \\times w \\times 6}" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": ", following (He et al., 2024; Gao et al., 2024). Temporal context is captured via a lightweight MLP over sinusoidal embedded" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 118, + 76, + 492, + 403 + ], + "blocks": [ + { + "bbox": [ + 118, + 76, + 492, + 403 + ], + "lines": [ + { + "bbox": [ + 118, + 76, + 492, + 403 + ], + "spans": [ + { + "bbox": [ + 118, + 76, + 492, + 403 + ], + "type": "image", + "image_path": "767f4bcd7f8825e3ca7df0605b4a362e6098d0785328a13ad2ac10801d30be44.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 411, + 504, + 467 + ], + "lines": [ + { + "bbox": [ + 104, + 411, + 504, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 411, + 504, + 467 + ], + "type": "text", + "content": "Figure 3: Qualitative results. We showcase WORLDMEM's capabilities through two sets of examples. Top: A comparison with Ground Truth (GT). WORLDMEM accurately models diverse dynamics (e.g., rain) by conditioning on 600 past frames, ensuring temporal consistency. Bottom: Interaction with the world. Objects like hay in the desert or wheat in the plains persist over time, with wheat visibly growing. For the best experience, see the supplementary videos." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 474, + 330, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 474, + 330, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 474, + 330, + 486 + ], + "type": "inline_equation", + "content": "(SE)" + }, + { + "bbox": [ + 105, + 474, + 330, + 486 + ], + "type": "text", + "content": " timestamps. The final embedding is (Figure 2 (c)):" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 241, + 490, + 504, + 503 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 490, + 504, + 503 + ], + "spans": [ + { + "bbox": [ + 241, + 490, + 504, + 503 + ], + "type": "interline_equation", + "content": "\\mathbf {E} = G _ {p} (\\mathrm {P E} (\\mathbf {p})) + G _ {t} (\\mathrm {S E} (t)), \\tag {3}", + "image_path": "b9bcfbda92d8a3209eb2317a81d6f669a80933add113b0e203523ee7051e8417.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 507, + 394, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 507, + 394, + 519 + ], + "spans": [ + { + "bbox": [ + 104, + 507, + 394, + 519 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 507, + 394, + 519 + ], + "type": "inline_equation", + "content": "G_{p}" + }, + { + "bbox": [ + 104, + 507, + 394, + 519 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 507, + 394, + 519 + ], + "type": "inline_equation", + "content": "G_{t}" + }, + { + "bbox": [ + 104, + 507, + 394, + 519 + ], + "type": "text", + "content": " are MLPs mapping pose and time into a shared space." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 523, + 504, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 523, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 523, + 504, + 568 + ], + "type": "text", + "content": "State-aware Memory Attention. To support reconstruction under viewpoint and temporal shifts, we introduce a state-aware attention mechanism that incorporates spatial-temporal cues into memory retrieval. By conditioning attention on both visual features and state information, the model achieves more accurate reasoning between input and memory." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_q\\in \\mathbb{R}^{l_q\\times d}" + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "text", + "content": " denote the flattened feature map of input frames (queries), and " + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mathbf{X}_k\\in \\mathbb{R}^{l_k\\times d}" + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "text", + "content": " the concatenated memory features (keys and values). We first enrich both with their corresponding state embeddings " + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_q" + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mathbf{E}_k" + }, + { + "bbox": [ + 104, + 571, + 504, + 606 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 230, + 606, + 504, + 620 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 606, + 504, + 620 + ], + "spans": [ + { + "bbox": [ + 230, + 606, + 504, + 620 + ], + "type": "interline_equation", + "content": "\\tilde {\\mathbf {X}} _ {q} = \\mathbf {X} _ {q} + \\mathbf {E} _ {q}, \\quad \\tilde {\\mathbf {X}} _ {k} = \\mathbf {X} _ {k} + \\mathbf {E} _ {k}. \\tag {4}", + "image_path": "ca8924287f821703821d8bf5d62aecb5fe7159ddc23de5a7e7b4d0f81a1a9737.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 627, + 469, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 627, + 469, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 627, + 469, + 639 + ], + "type": "text", + "content": "Cross-attention is then applied to retrieve relevant memory content and output updated " + }, + { + "bbox": [ + 104, + 627, + 469, + 639 + ], + "type": "inline_equation", + "content": "\\mathbf{X}^{\\prime}" + }, + { + "bbox": [ + 104, + 627, + 469, + 639 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 180, + 643, + 504, + 658 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 643, + 504, + 658 + ], + "spans": [ + { + "bbox": [ + 180, + 643, + 504, + 658 + ], + "type": "interline_equation", + "content": "\\mathbf {X} ^ {\\prime} = \\operatorname {C r o s s A t t n} (Q = p _ {q} (\\tilde {\\mathbf {X}} _ {q}), K = p _ {k} (\\tilde {\\mathbf {X}} _ {k}), V = p _ {v} (\\mathbf {X} _ {k})), \\tag {5}", + "image_path": "3549a7c6280ea8f465ef040bf31d25e38f052420b7d8d952435c0d665c12cb43.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 662, + 293, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 293, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 293, + 674 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 662, + 293, + 674 + ], + "type": "inline_equation", + "content": "p_q, p_k" + }, + { + "bbox": [ + 104, + 662, + 293, + 674 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 662, + 293, + 674 + ], + "type": "inline_equation", + "content": "p_v" + }, + { + "bbox": [ + 104, + 662, + 293, + 674 + ], + "type": "text", + "content": " are learnable projections." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": "To simplify the reasoning space, we adopt a relative state formulation. For each query frame, the state is set to a zero reference (e.g., the pose is reset to the identity and the timestamp to zero), while the states of key frames are normalized to relative values. This design, illustrated in Figure 2(d), improves alignment under viewpoint changes and simplifies the learning objective." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 76, + 298, + 190 + ], + "blocks": [ + { + "bbox": [ + 108, + 76, + 298, + 190 + ], + "lines": [ + { + "bbox": [ + 108, + 76, + 298, + 190 + ], + "spans": [ + { + "bbox": [ + 108, + 76, + 298, + 190 + ], + "type": "image", + "image_path": "55568ec2d9052a84d2f43f5fd983fa65c403765847b1eb321dd4a5371fac8f43.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 195, + 299, + 239 + ], + "lines": [ + { + "bbox": [ + 104, + 195, + 299, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 195, + 299, + 239 + ], + "type": "text", + "content": "Figure 4: Within context window evaluation. The motion sequence involves turning right and returning to the original position, showing self-contained consistency." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 94, + 261, + 261, + 395 + ], + "blocks": [ + { + "bbox": [ + 106, + 243, + 242, + 254 + ], + "lines": [ + { + "bbox": [ + 106, + 243, + 242, + 254 + ], + "spans": [ + { + "bbox": [ + 106, + 243, + 242, + 254 + ], + "type": "text", + "content": "Table 1: Evaluation on Minecraft" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 94, + 261, + 261, + 395 + ], + "lines": [ + { + "bbox": [ + 94, + 261, + 261, + 395 + ], + "spans": [ + { + "bbox": [ + 94, + 261, + 261, + 395 + ], + "type": "table", + "html": "
Within context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.14.350.069113.87
DF20.560.009413.88
Ours21.010.007213.73
Beyond context window
MethodsPSNR ↑LPIPS ↓rFID ↓
Full Seq.///
DF18.040.437651.28
Ours19.320.142915.37
", + "image_path": "bdee5425e500778b85a96d164d5855b2c6ff216180af2cfc50a7e625931f6040.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 313, + 74, + 502, + 187 + ], + "blocks": [ + { + "bbox": [ + 313, + 74, + 502, + 187 + ], + "lines": [ + { + "bbox": [ + 313, + 74, + 502, + 187 + ], + "spans": [ + { + "bbox": [ + 313, + 74, + 502, + 187 + ], + "type": "image", + "image_path": "d1e6c276910048364854297556611dd2a45a1eea429cab3c84376a57362243d5.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 195, + 506, + 240 + ], + "lines": [ + { + "bbox": [ + 310, + 195, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 310, + 195, + 506, + 240 + ], + "type": "text", + "content": "Figure 5: Beyond context window evaluation. Diffusion-Forcing suffers inconsistency over time, while ours maintains quality and recovers past scenes." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 274, + 261, + 502, + 315 + ], + "blocks": [ + { + "bbox": [ + 284, + 243, + 449, + 255 + ], + "lines": [ + { + "bbox": [ + 284, + 243, + 449, + 255 + ], + "spans": [ + { + "bbox": [ + 284, + 243, + 449, + 255 + ], + "type": "text", + "content": "Table 2: Ablation on embedding designs" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 274, + 261, + 502, + 315 + ], + "lines": [ + { + "bbox": [ + 274, + 261, + 502, + 315 + ], + "spans": [ + { + "bbox": [ + 274, + 261, + 502, + 315 + ], + "type": "table", + "html": "
Pose typeEmbed. typePSNR ↑LPIPS ↓rFID ↓
SparseAbsolute14.670.288739.23
DenseAbsolute17.630.183029.34
DenseRelative19.320.142915.37
", + "image_path": "a2b0f1636dc3ff7c8dfc235adca8ea0a713423b114cd97c50c1377020f680216.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 274, + 341, + 479, + 394 + ], + "blocks": [ + { + "bbox": [ + 274, + 323, + 459, + 335 + ], + "lines": [ + { + "bbox": [ + 274, + 323, + 459, + 335 + ], + "spans": [ + { + "bbox": [ + 274, + 323, + 459, + 335 + ], + "type": "text", + "content": "Table 3: Ablation on memory retrieve strategy" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 274, + 341, + 479, + 394 + ], + "lines": [ + { + "bbox": [ + 274, + 341, + 479, + 394 + ], + "spans": [ + { + "bbox": [ + 274, + 341, + 479, + 394 + ], + "type": "table", + "html": "
StrategyPSNR ↑LPIPS ↓rFID ↓
Random12.320.322447.35
+ Confidence Filter17.120.186324.33
+ Similarity Filter19.320.142915.37
", + "image_path": "5be7f40ca0170cf0931349adf148b2063a4d2a61d782d7c6200d3dc6a412a8d7.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "text", + "content": "Incorporating memory into pipeline. We incorporate memory frames into the pipeline by treating them as clean inputs during both training and inference. As shown in Figure 2 (a-b), during training, memory frames are assigned the lowest noise level " + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "inline_equation", + "content": "k_{\\mathrm{min}}" + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "text", + "content": ", while context window frames receive independently sampled noise levels from the range " + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "inline_equation", + "content": "[k_{\\mathrm{min}}, k_{\\mathrm{max}}]" + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "text", + "content": ". During inference, both memory and context frames are assigned " + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "inline_equation", + "content": "k_{\\mathrm{min}}" + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "text", + "content": ", while the current generating frames are assigned " + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "inline_equation", + "content": "k_{\\mathrm{max}}" + }, + { + "bbox": [ + 104, + 415, + 504, + 471 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 475, + 468, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 468, + 488 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 468, + 488 + ], + "type": "text", + "content": "To restrict memory influence only to memory blocks, we apply a temporal attention mask:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 224, + 493, + 505, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 224, + 493, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 224, + 493, + 505, + 533 + ], + "type": "interline_equation", + "content": "A _ {\\text {m a s k}} (i, j) = \\left\\{ \\begin{array}{l l} 1, & i \\leq L _ {M} \\text {a n d} j = i \\\\ 1, & i > L _ {M} \\text {a n d} j \\leq i \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right. \\tag {6}", + "image_path": "051a668f07afe27adca49a42fba69f683663d42d43addef7ff5276c78d55d7e8.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "type": "inline_equation", + "content": "L_{M}" + }, + { + "bbox": [ + 104, + 539, + 504, + 562 + ], + "type": "text", + "content": " is the number of memory frames that are appended before frames within the context window. This guarantees causal attention while preventing memory units from affecting each other." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 577, + 192, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 577, + 192, + 590 + ], + "spans": [ + { + "bbox": [ + 105, + 577, + 192, + 590 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 601, + 504, + 647 + ], + "type": "text", + "content": "Datasets. We use MineDojo (Fan et al., 2022) to create diverse training and evaluation datasets in Minecraft, configuring diverse environments (e.g., plains, savannas, ice plains, and deserts), agent actions, and interactions. For real-world scenes, we utilize RealEstate10K (Zhou et al., 2018) with camera pose annotations to evaluate long-term world consistency." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 650, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 506, + 696 + ], + "type": "text", + "content": "Metrics. For quantitative evaluation, we employ reconstruction metrics, where the method of obtaining ground truth (GT) varies by specific settings. We then assess the consistency and quality of the generated videos using PSNR, LPIPS (Zhang et al., 2018), and reconstruction FID (rFID) (Heusel et al., 2017), which collectively measure pixel-level fidelity, perceptual similarity, and overall realism." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 699, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 699, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 699, + 505, + 723 + ], + "type": "text", + "content": "Experimental details. For our experiments on Minecraft (Fan et al., 2022), we utilize the Oasis (Decart et al., 2024) as the base model. Our model is trained using the Adam optimizer with a fixed" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 72, + 302, + 198 + ], + "blocks": [ + { + "bbox": [ + 111, + 72, + 302, + 198 + ], + "lines": [ + { + "bbox": [ + 111, + 72, + 302, + 198 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 302, + 198 + ], + "type": "image", + "image_path": "80be7710b7aac22f2f910ef78e2582ba42b65a4d9eacce9bebbb6f7e2b7ed9dd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 200, + 504, + 234 + ], + "lines": [ + { + "bbox": [ + 104, + 200, + 504, + 234 + ], + "spans": [ + { + "bbox": [ + 104, + 200, + 504, + 234 + ], + "type": "text", + "content": "Figure 6: Results on RealEstate (Zhou et al., 2018). We visualize loop closure consistency over a full camera rotation. The visual similarity between the first and last frames serves as a qualitative indicator of 3D spatial consistency." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 308, + 73, + 496, + 198 + ], + "blocks": [ + { + "bbox": [ + 308, + 73, + 496, + 198 + ], + "lines": [ + { + "bbox": [ + 308, + 73, + 496, + 198 + ], + "spans": [ + { + "bbox": [ + 308, + 73, + 496, + 198 + ], + "type": "image", + "image_path": "ddc2586f3345e661f33248b6cc0087c4703090e0712fed894a0128507250e4b5.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 184, + 255, + 424, + 329 + ], + "blocks": [ + { + "bbox": [ + 226, + 244, + 382, + 255 + ], + "lines": [ + { + "bbox": [ + 226, + 244, + 382, + 255 + ], + "spans": [ + { + "bbox": [ + 226, + 244, + 382, + 255 + ], + "type": "text", + "content": "Table 4: Evaluation on RealEstate10K" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 184, + 255, + 424, + 329 + ], + "lines": [ + { + "bbox": [ + 184, + 255, + 424, + 329 + ], + "spans": [ + { + "bbox": [ + 184, + 255, + 424, + 329 + ], + "type": "table", + "html": "
MethodsPSNR ↑LPIPS ↓rFID ↓
CameraCtrl (He et al., 2024)13.190.3328133.81
TrajAttn (Xiao et al., 2024)14.220.3698128.36
Viewcrafter (Yu et al., 2024c)21.720.172958.43
DFoT (Song et al., 2025)16.420.2933110.34
Ours23.340.167243.14
", + "image_path": "aae9a2f9f9ff8ee0fcb633f9a1bd4ffb65580a3167cf48d007a602089fab10cb.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": "learning rate of " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-5}" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": ". Training is conducted at a resolution of " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "640 \\times 360" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": ", where frames are first encoded into a latent space via a VAE at a resolution of " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "32 \\times 18" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": ", then further patchified to " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "16 \\times 9" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": ". Our training dataset comprises approximately 12K long videos, each containing 1500 frames, generated from Fan et al. (2022). During training, we employ an 8-frame temporal context window alongside an 8-frame memory window. The model is trained for approximately 500K steps using 4 GPUs, with a batch size of 4 per GPU. For the hyperparameters specified in Algorithm 1 of the main paper, we set the similarity threshold " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "tr" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": " to 0.9, " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "w_{o}" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": " to 1, and " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "w_{t}" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "0.2 / t_{c}" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": ". For the noise levels in Eq. (5) and Eq. (6), we set " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "k_{\\min}" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": " to 15 and " + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "inline_equation", + "content": "k_{\\max}" + }, + { + "bbox": [ + 104, + 337, + 506, + 426 + ], + "type": "text", + "content": " to 1000." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 430, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 430, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 430, + 504, + 475 + ], + "type": "text", + "content": "For our experiments on RealEstate10K (Zhou et al., 2018), we adopt DFoT (Song et al., 2025) as the base model. The RealEstate10K dataset provides a training set of approximately 65K short video clips. Training is conducted at a resolution of " + }, + { + "bbox": [ + 104, + 430, + 504, + 475 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 104, + 430, + 504, + 475 + ], + "type": "text", + "content": ", with frames patched to " + }, + { + "bbox": [ + 104, + 430, + 504, + 475 + ], + "type": "inline_equation", + "content": "128 \\times 128" + }, + { + "bbox": [ + 104, + 430, + 504, + 475 + ], + "type": "text", + "content": ". The model is trained for approximately 50K steps using 4 GPUs, with a batch size of 8 per GPU." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 488, + 278, + 499 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 488, + 278, + 499 + ], + "spans": [ + { + "bbox": [ + 105, + 488, + 278, + 499 + ], + "type": "text", + "content": "4.1 Results on Generation Benchmark" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 508, + 506, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 506, + 629 + ], + "type": "text", + "content": "Comparisons on Minecraft Benchmark. We compare our approach with a standard full-sequence (Full Seq.) training method (He et al., 2024; Wang et al., 2024) and Diffusion Forcing (DF) (Chen et al., 2025). The key differences are as follows: the full-sequence conditional diffusion transformer (Peebles and Xie, 2023) maintains the same noise level during training and inference, DF introduces different noise levels for training and inference, and our method incorporates a memory mechanism. To assess both short-term and long-term world consistency, we conduct evaluations within and beyond the context window. We evaluate both settings on 300 test videos. In the following experiments, the agent's poses are generated by the game simulator as ground truth. However, in real-world scenarios, only the action input is available, and the pose is not directly observable. In such cases, the next-frame pose can be predicted based on the previous scenes, past states, and the upcoming action. We explore this design choice in the supplementary material." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "Within context window. For this experiment, all methods use a context window of 16, while our approach additionally maintains a memory window of 8. We test on customized motion scenarios (e.g., turn left, then turn right or move forward, then backward) to assess self-contained consistency, where the ground truth consists of previously generated frames at the same positions. As shown in Table 1 and Figure 4, the full-sequence baseline suffers from inconsistencies even within its own context window. DF improves consistency by enabling greater information exchange among generated frames. Our memory-based approach achieves the best performance, demonstrating the effectiveness of integrating a dedicated memory mechanism." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 197, + 89, + 411, + 145 + ], + "blocks": [ + { + "bbox": [ + 201, + 77, + 409, + 89 + ], + "lines": [ + { + "bbox": [ + 201, + 77, + 409, + 89 + ], + "spans": [ + { + "bbox": [ + 201, + 77, + 409, + 89 + ], + "type": "text", + "content": "Table 5: Ablation on sampling strategy for training" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 197, + 89, + 411, + 145 + ], + "lines": [ + { + "bbox": [ + 197, + 89, + 411, + 145 + ], + "spans": [ + { + "bbox": [ + 197, + 89, + 411, + 145 + ], + "type": "table", + "html": "
Sampling strategyPSNR ↑LPIPS ↓rFID ↓
Small-range13.230.378646.55
Large-range15.110.385542.96
Progressive19.320.142915.37
", + "image_path": "9e63e34502d19e1a9660587fe1b448ca4cbb22eb6399eea90032701c88909b36.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 151, + 506, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 151, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 151, + 506, + 240 + ], + "type": "text", + "content": "Beyond context window. In this setting, all methods use a context window of 8 and generate 100 future frames; our method further employs a memory window of 8 while initializing a 600-frame memory bank. We compute the reconstruction error using the subsequent 100 ground truth frames after 600 frames. Full-sequence methods can not roll out that long so we exclude it. DF exhibits poor PSNR and LPIPS scores, indicating severe inconsistency with the ground truth beyond the context window. Additionally, its low rFID suggests notable quality degradation. In contrast, our memory-augmented approach consistently outperforms others across all metrics, demonstrating superior long-term consistency and quality preservation. Figure 5 further substantiates these findings." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 504, + 310 + ], + "type": "text", + "content": "Figure 3 showcases WORLDMEM's capabilities. The top section demonstrates its ability to operate in a free action space across diverse environments. Given a 600-frame memory bank, our model generates 100 future frames while preserving the ground truth's actions and poses, ensuring strong world consistency. The bottom section highlights dynamic environment interaction. By using timestamps as embeddings, the model remembers environmental changes and captures natural event evolution, such as plant growth over time." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 315, + 506, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 315, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 315, + 506, + 370 + ], + "type": "text", + "content": "Comparisons on Real Scenarios. We compare our method with prior works (He et al., 2024; Xiao et al., 2024; Yu et al., 2024c; Song et al., 2025) on the RealEstate10K dataset (Zhou et al., 2018). We design 5 evaluation trajectories, each starting and ending at the same pose, across 100 scenes. The trajectory lengths range from 37 to 60 frames – exceeding the training lengths of all baselines (maximum 25 frames)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 374, + 504, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 504, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 504, + 453 + ], + "type": "text", + "content": "CameraCtrl (He et al., 2024), TrajAttn (Xiao et al., 2024), and DFoT (Song et al., 2025) discard past frames and suffer from inconsistency. Viewcrafter (Yu et al., 2024c) incorporates explicit 3D reconstruction, yielding better results, but is constrained by errors in post-processing such as reconstruction and rendering. As shown in Table 4 and Figure 6, our approach achieves superior performance across all metrics. However, the RealEstate dataset inherently limits the full potential of our method, as it consists of short, non-interactive clips with limited temporal complexity. We leave evaluation under more challenging and interactive real-world scenarios for future work." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 466, + 168, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 466, + 168, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 466, + 168, + 477 + ], + "type": "text", + "content": "4.2 Ablation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 487, + 504, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 575 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 575 + ], + "type": "text", + "content": "**Embedding designs.** The design of embeddings within the memory block is crucial for cross-frame relationship modeling. We evaluate three strategies (Table 2): (1) sparse pose embedding with absolute encoding, (2) dense pose embedding with absolute encoding, and (3) dense pose embedding with relative encoding. Results show that dense pose embeddings (Plücker embedding) significantly enhance all metrics, emphasizing the benefits of richer pose representations. Switching from absolute to relative encoding further improves performance, particularly in LPIPS and rFID, by facilitating relationship reasoning and information retrieval. As illustrated in Figure 7, absolute embeddings accumulate errors over time, while relative embeddings maintain stability even beyond 300 frames." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 579, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 504, + 635 + ], + "type": "text", + "content": "Sampling strategy for training. We compare different sampling strategies during training in the Minecraft benchmark. Small-range sampling restricts memory conditioning to frames within " + }, + { + "bbox": [ + 104, + 579, + 504, + 635 + ], + "type": "inline_equation", + "content": "2\\mathrm{m}" + }, + { + "bbox": [ + 104, + 579, + 504, + 635 + ], + "type": "text", + "content": " in the Minecraft world, while large-range sampling extends this range to " + }, + { + "bbox": [ + 104, + 579, + 504, + 635 + ], + "type": "inline_equation", + "content": "8\\mathrm{m}" + }, + { + "bbox": [ + 104, + 579, + 504, + 635 + ], + "type": "text", + "content": ". Progressive sampling, on the other hand, begins with small-range samples for initial training steps and then gradually expands to large-range samples." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 685 + ], + "type": "text", + "content": "As shown in Table 5, both small-range and large-range sampling struggle with consistency and quality, whereas progressive sampling significantly improves all metrics. This suggests that gradually increasing difficulty during training helps the model learn to reason and effectively query information from memory blocks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "Time condition. We ablate the effectiveness of the timestamp condition (for both embedding and retrieval) in Table 6. We curate 100 video samples featuring placing events and evaluate whether future generations align with event progression. As shown in the table, incorporating the time" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 74, + 293, + 186 + ], + "blocks": [ + { + "bbox": [ + 111, + 74, + 293, + 186 + ], + "lines": [ + { + "bbox": [ + 111, + 74, + 293, + 186 + ], + "spans": [ + { + "bbox": [ + 111, + 74, + 293, + 186 + ], + "type": "image", + "image_path": "c891de3c6e4cfb733593f322c26a2b2245c57e9ed2ba6ef2a8161dce2e9e97c1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 190, + 301, + 312 + ], + "lines": [ + { + "bbox": [ + 104, + 190, + 301, + 312 + ], + "spans": [ + { + "bbox": [ + 104, + 190, + 301, + 312 + ], + "type": "text", + "content": "Figure 7: Long-term Generation Comparison. This figure presents the PSNR of different ablation methods compared to the ground truth over a 300-frame sequence. The results show that our method without memory blocks or using random memory retrieval exhibits immediate inconsistencies with the ground truth. Additionally, the model lacking relative embeddings begins to degrade significantly beyond 100 frames. In contrast, our full method maintains strong consistency even beyond 300 frames." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 313, + 79, + 503, + 140 + ], + "blocks": [ + { + "bbox": [ + 313, + 79, + 503, + 140 + ], + "lines": [ + { + "bbox": [ + 313, + 79, + 503, + 140 + ], + "spans": [ + { + "bbox": [ + 313, + 79, + 503, + 140 + ], + "type": "image", + "image_path": "6d83deba8dc1fb557d72f7e206ad8763aaf1db95ce734260003174404ea4cd47.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 310, + 146, + 506, + 213 + ], + "lines": [ + { + "bbox": [ + 310, + 146, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 310, + 146, + 506, + 213 + ], + "type": "text", + "content": "Figure 8: Results w/o and w/ time condition. Without timestamps, the model fails to differentiate memory units from the same location at different times, causing errors. With time conditioning, it aligns with the updated world state, ensuring consistency." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 313, + 260, + 503, + 304 + ], + "blocks": [ + { + "bbox": [ + 334, + 243, + 481, + 254 + ], + "lines": [ + { + "bbox": [ + 334, + 243, + 481, + 254 + ], + "spans": [ + { + "bbox": [ + 334, + 243, + 481, + 254 + ], + "type": "text", + "content": "Table 6: Ablation on time condition" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 313, + 260, + 503, + 304 + ], + "lines": [ + { + "bbox": [ + 313, + 260, + 503, + 304 + ], + "spans": [ + { + "bbox": [ + 313, + 260, + 503, + 304 + ], + "type": "table", + "html": "
Time conditionPSNR ↑LPIPS ↓rFID ↓
w/o17.170.198923.89
w/19.120.161316.53
", + "image_path": "30d17f42dbaa4ca8d8b12815ea604146efbf347f4ae14367a292fdb24ea2af4b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 326, + 504, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 371 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 371 + ], + "type": "text", + "content": "condition significantly improves PSNR and LPIPS, indicating that adding temporal information helps the model faithfully reproduce event changes in world simulation. Since events like plant growth are inherently unpredictable, we do not conduct quantitative evaluations on such cases but instead provide qualitative illustrations in Figure 8." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 376, + 504, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 441 + ], + "type": "text", + "content": "Memory retrieve strategy. We analyze memory retrieval strategies in Table 3. Random sampling from the memory bank leads to poor performance and severe quality degradation, as evidenced by a sharp drop in rFID and rapid divergence from the ground truth (Figure 7). The confidence-based filtering significantly enhances consistency and generation quality. Additionally, we refine retrieval by filtering out redundant memory units based on similarity, further improving all evaluation metrics and demonstrating the effectiveness of our approach." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 460, + 279, + 472 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 460, + 279, + 472 + ], + "spans": [ + { + "bbox": [ + 105, + 460, + 279, + 472 + ], + "type": "text", + "content": "5 Limitations and Future works" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 486, + 504, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 486, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 486, + 504, + 563 + ], + "type": "text", + "content": "Despite the effectiveness of our approach, certain issues warrant further exploration. First, we cannot guarantee that we can always retrieve all necessary information from the memory bank In some corner cases (e.g., when views are blocked by obstacles), relying solely on view overlap may be insufficient. Second, our current interaction with the environment lacks diversity and realism. In future work, we plan to extend our models to real-world scenarios with more realistic and varied interactions. Lastly, our memory design still entails linearly increasing memory usage, which may impose limitations when handling extremely long sequences." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 581, + 183, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 581, + 183, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 183, + 594 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 607, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 607, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 607, + 504, + 685 + ], + "type": "text", + "content": "In conclusion, WOrLDMEM tackles the longstanding challenge of maintaining long-term consistency in world simulation by employing a memory bank of past frames and associated states. Its memory attention mechanism enables accurate reconstruction of previously observed scenes, even under large viewpoints or temporal gaps, and effectively models dynamic changes over time. Extensive experiments in both virtual and real settings confirm WOrLDMEM's capacity for robust, immersive world simulation. We hope our work will encourage further research on the design and applications of memory-based world simulators." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "content": "Acknowledgements. This research is supported by the National Research Foundation, Singapore, under its NRF Fellowship Award . This research is also supported by NTU SUG-NAP, as well as cash and in-kind funding from NTU S-Lab and industry partner(s)." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 89, + 507, + 721 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 105, + 89, + 507, + 121 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 89, + 507, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 89, + 507, + 121 + ], + "type": "text", + "content": "Eloi Alonso, Adam Jelley, Vincent Micheli, Anssi Kanervisto, Amos J Storkey, Tim Pearce, and François Fleuret. Diffusion for world modeling: Visual details matter in atari. Advances in Neural Information Processing Systems, 37:58757-58791, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 127, + 485, + 140 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 127, + 485, + 140 + ], + "spans": [ + { + "bbox": [ + 107, + 127, + 485, + 140 + ], + "type": "text", + "content": "Amir Bar, Gaoyue Zhou, Danny Tran, Trevor Darrell, and Yann LeCun. Navigation world models, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 146, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 146, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 146, + 506, + 176 + ], + "type": "text", + "content": "Charles Beattie, Joel Z Leibo, Denis Teplyashin, Tom Ward, Marcus Wainwright, Heinrich Kuttler, Andrew Lefrancq, Simon Green, Víctor Valdés, Amir Sadik, et al. Deepmind lab. arXiv preprint arXiv:1612.03801, 2016." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 184, + 504, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 184, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 184, + 504, + 215 + ], + "type": "text", + "content": "Boyuan Chen, Diego Martí Monsó, Yilun Du, Max Simchowitz, Russ Tedrake, and Vincent Sitzmann. Diffusion forcing: Next-token prediction meets full-sequence diffusion. Advances in Neural Information Processing Systems, 37:24081-24125, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 223, + 506, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 506, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 506, + 254 + ], + "type": "text", + "content": "Haoxin Chen, Menghan Xia, Yingqing He, Yong Zhang, Xiaodong Cun, Shaoshu Yang, Jinbo Xing, Yaofang Liu, Qifeng Chen, Xintao Wang, et al. Videocraft1: Open diffusion models for high-quality video generation. arXiv preprint arXiv:2310.19512, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 261, + 504, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 261, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 107, + 261, + 504, + 282 + ], + "type": "text", + "content": "Decart, Julian Quevedo, Quinn McIntyre, Spruce Campbell, Xinlei Chen, and Robert Wachen. Oasis: A universe in a transformer. 2024. Project website." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 289, + 506, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 506, + 320 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 506, + 320 + ], + "type": "text", + "content": "Linxi Fan, Guanzhi Wang, Yunfan Jiang, Ajay Mandlekar, Yuncong Yang, Haoyi Zhu, Andrew Tang, DeAn Huang, Yuke Zhu, and Anima Anandkumar. Minedojo: Building open-ended embodied agents with internet-scale knowledge. Advances in Neural Information Processing Systems, 35:18343-18362, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 328, + 505, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 328, + 505, + 359 + ], + "spans": [ + { + "bbox": [ + 107, + 328, + 505, + 359 + ], + "type": "text", + "content": "Ruili Feng, Han Zhang, Zhantao Yang, Jie Xiao, Zhilei Shu, Zhiheng Liu, Andy Zheng, Yukun Huang, Yu Liu, and Hongyang Zhang. The matrix: Infinite-horizon world generation with real-time moving control. arXiv preprint arXiv:2412.03568, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 366, + 505, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 366, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 107, + 366, + 505, + 398 + ], + "type": "text", + "content": "Ruiqi Gao, Aleksander Holynski, Philipp Henzler, Arthur Brussee, Ricardo Martin-Brualla, Pratul Srinivasan, Jonathan T Barron, and Ben Poole. Cat3d: Create anything in 3d with multi-view diffusion models. arXiv preprint arXiv:2405.10314, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 405, + 506, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 405, + 506, + 436 + ], + "spans": [ + { + "bbox": [ + 107, + 405, + 506, + 436 + ], + "type": "text", + "content": "Yuwei Guo, Ceyuan Yang, Anyi Rao, Zhengyang Liang, Yaohui Wang, Yu Qiao, Maneesh Agrawala, Dahua Lin, and Bo Dai. Animatediff: Animate your personalized text-to-image diffusion models without specific tuning. arXiv preprint arXiv:2307.04725, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 443, + 504, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 443, + 504, + 465 + ], + "spans": [ + { + "bbox": [ + 107, + 443, + 504, + 465 + ], + "type": "text", + "content": "David Ha and Jürgen Schmidhuber. Recurrent world models facilitate policy evolution. Advances in neural information processing systems, 31, 2018a." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 472, + 443, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 472, + 443, + 483 + ], + "spans": [ + { + "bbox": [ + 107, + 472, + 443, + 483 + ], + "type": "text", + "content": "David Ha and Jürgen Schmidhuber. World models. arXiv preprint arXiv:1803.10122, 2018b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 491, + 504, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 491, + 504, + 511 + ], + "spans": [ + { + "bbox": [ + 107, + 491, + 504, + 511 + ], + "type": "text", + "content": "Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning behaviors by latent imagination. arXiv preprint arXiv:1912.01603, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 519, + 504, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 519, + 504, + 540 + ], + "spans": [ + { + "bbox": [ + 107, + 519, + 504, + 540 + ], + "type": "text", + "content": "Danijar Hafner, Timothy Lillicrap, Mohammad Norouzi, and Jimmy Ba. Mastering atari with discrete world models. arXiv preprint arXiv:2010.02193, 2020." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 547, + 505, + 568 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 547, + 505, + 568 + ], + "spans": [ + { + "bbox": [ + 107, + 547, + 505, + 568 + ], + "type": "text", + "content": "Hao He, Yinghao Xu, Yuwei Guo, Gordon Wetzstein, Bo Dai, Hongsheng Li, and Ceyuan Yang. Cameractrol: Enabling camera control for text-to-video generation. arXiv preprint arXiv:2404.02101, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 575, + 504, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 575, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 107, + 575, + 504, + 606 + ], + "type": "text", + "content": "Roberto Henschel, Levon Khachatryan, Daniil Hayrapetyan, Hayk Poghosyan, Vahram Tadevosyan, Zhangyang Wang, Shant Navasardyan, and Humphrey Shi. Streamingt2v: Consistent, dynamic, and extendable long video generation from text. arXiv preprint arXiv:2403.14773, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 614, + 504, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 614, + 504, + 645 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 504, + 645 + ], + "type": "text", + "content": "Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 652, + 504, + 683 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 652, + 504, + 683 + ], + "spans": [ + { + "bbox": [ + 107, + 652, + 504, + 683 + ], + "type": "text", + "content": "Yining Hong, Beide Liu, Maxine Wu, Yuanhao Zhai, Kai-Wei Chang, Linjie Li, Kevin Lin, Chung-Ching Lin, Jianfeng Wang, Zhengyuan Yang, Ying Nian Wu, and Lijuan Wang Wang. Slowfast-vgen: Slow-fast learning for action-driven long video generation. arXiv preprint arXiv:2410.23277, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 691, + 506, + 721 + ], + "type": "text", + "content": "Anthony Hu, Lloyd Russell, Hudson Yeo, Zak Murez, George Fedoseev, Alex Kendall, Jamie Shotton, and Gianluca Corrado. Gaia-1: A generative world model for autonomous driving. arXiv preprint arXiv:2309.17080, 2023." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 102, + 506, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 102, + 506, + 133 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 506, + 133 + ], + "type": "text", + "content": "Hanwen Jiang, Hao Tan, Peng Wang, Haian Jin, Yue Zhao, Sai Bi, Kai Zhang, Fujun Luan, Kalyan Sunkavalli, Qixing Huang, et al. Rayzer: A self-supervised large view synthesis model. arXiv preprint arXiv:2505.00702, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 141, + 505, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 141, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 107, + 141, + 505, + 172 + ], + "type": "text", + "content": "Yang Jin, Zhicheng Sun, Ningyuan Li, Kun Xu, Hao Jiang, Nan Zhuang, Quzhe Huang, Yang Song, Yadong Mu, and Zhouchen Lin. Pyramidal flow matching for efficient video generative modeling. arXiv preprint arXiv:2410.05954, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 180, + 505, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 180, + 505, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 180, + 505, + 211 + ], + "type": "text", + "content": "Jihwan Kim, Junoh Kang, Jinyoung Choi, and Bohyung Han. FIFO-diffusion: Generating infinite videos from text without training. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 220, + 505, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 220, + 505, + 281 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 505, + 281 + ], + "type": "text", + "content": "Dan Kondratyuk, Lijun Yu, Xiuye Gu, José Lezama, Jonathan Huang, Grant Schindler, Rachel Hornung, Vighnesh Birodkar, Jimmy Yan, Ming-Chang Chiu, Krishna Somandepalli, Hassan Akbari, Yair Alon, Yong Cheng, Josh Dillon, Agrim Gupta, Meera Hahn, Anja Hauth, David Hendon, Alonso Martinez, David Minnen, Mikhail Sirotenko, Kihyuk Sohn, Xuan Yang, Hartwig Adam, Ming-Hsuan Yang, Irfan Essa, Huisheng Wang, David A. Ross, Bryan Seybold, and Lu Jiang. Videopoet: A large language model for zero-shot video generation, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 289, + 505, + 320 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 289, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 106, + 289, + 505, + 320 + ], + "type": "text", + "content": "Hanwen Liang, Junli Cao, Vidit Goel, Guocheng Qian, Sergei Korolev, Demetri Terzopoulos, Konstantinos N Plataniotis, Sergey Tulyakov, and Jian Ren. Wonderland: Navigating 3d scenes from a single image. arXiv preprint arXiv:2412.12091, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 328, + 505, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 328, + 505, + 358 + ], + "spans": [ + { + "bbox": [ + 106, + 328, + 505, + 358 + ], + "type": "text", + "content": "Fangfu Liu, Wenqiang Sun, Hanyang Wang, Yikai Wang, Haowen Sun, Junliang Ye, Jun Zhang, and Yueqi Duan. Reconx: Reconstruct any scene from sparse views with video diffusion model. arXiv preprint arXiv:2408.16767, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 367, + 505, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 367, + 505, + 388 + ], + "spans": [ + { + "bbox": [ + 107, + 367, + 505, + 388 + ], + "type": "text", + "content": "Xin Ma, Yaohui Wang, Gengyun Jia, Xinyuan Chen, Ziwei Liu, Yuan-Fang Li, Cunjian Chen, and Yu Qiao. Latte: Latent diffusion transformer for video generation. arXiv preprint arXiv:2401.03048, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 396, + 505, + 418 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 396, + 505, + 418 + ], + "spans": [ + { + "bbox": [ + 106, + 396, + 505, + 418 + ], + "type": "text", + "content": "OpenAI. Video generation models as world simulators. https://openai.com/research/video-generation-models-as-world-simulators, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 426, + 505, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 426, + 505, + 486 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 505, + 486 + ], + "type": "text", + "content": "Jack Parker-Holder, Philip Ball, Jake Bruce, Vibhavari Dasagi, Kristian Holsheimer, Christos Kaplanis, Alexandre Moufarek, Guy Scully, Jeremy Shar, Jimmy Shi, Stephen Spencer, Jessica Yung, Michael Dennis, Sultan Kenjeyev, Shangbang Long, Vlad Mnih, Harris Chan, Maxime Gazeau, Bonnie Li, Fabio Pardo, Luyu Wang, Lei Zhang, Frederic Besse, Tim Harley, Anna Mitenkova, Jane Wang, Jeff Clune, Demis Hassabis, Raia Hadsell, Adrian Bolton, Satinder Singh, and Tim Roktaschel. Genie 2: A large-scale foundation world model. 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 495, + 505, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 495, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 505, + 517 + ], + "type": "text", + "content": "William Peebles and Saining Xie. Scalable diffusion models with transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4195-4205, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 524, + 505, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 524, + 505, + 555 + ], + "spans": [ + { + "bbox": [ + 106, + 524, + 505, + 555 + ], + "type": "text", + "content": "Tanzila Rahman, Hsin-Ying Lee, Jian Ren, Sergey Tulyakov, Shweta Mahajan, and Leonid Sigal. Make-a-story: Visual memory conditioned consistent story generation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 2493-2502, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 563, + 505, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 563, + 505, + 604 + ], + "spans": [ + { + "bbox": [ + 106, + 563, + 505, + 604 + ], + "type": "text", + "content": "Xuanchi Ren, Tianchang Shen, Jiahui Huang, Huan Ling, Yifan Lu, Merlin Nimier-David, Thomas Müller, Alexander Keller, Sanja Fidler, and Jun Gao. Gen3c: 3d-informed world-consistent video generation with precise camera control. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 612, + 505, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 612, + 505, + 654 + ], + "spans": [ + { + "bbox": [ + 106, + 612, + 505, + 654 + ], + "type": "text", + "content": "Mehdi SM Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, et al. Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6229-6238, 2022." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 662, + 505, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 662, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 662, + 505, + 693 + ], + "type": "text", + "content": "Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 700, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 700, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 505, + 722 + ], + "type": "text", + "content": "Kiwhan Song, Boyuan Chen, Max Simchowitz, Yilun Du, Russ Tedrake, and Vincent Sitzmann. History-guided video diffusion. arXiv preprint arXiv:2502.06764, 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 507, + 722 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 106, + 72, + 507, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 507, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 507, + 103 + ], + "type": "text", + "content": "Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 111, + 505, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 111, + 505, + 133 + ], + "spans": [ + { + "bbox": [ + 106, + 111, + 505, + 133 + ], + "type": "text", + "content": "Dani Valevski, Yaniv Leviathan, Moab Arar, and Shlomi Fruchter. Diffusion models are real-time game engines. arXiv preprint arXiv:2408.14837, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 140, + 506, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 140, + 506, + 162 + ], + "spans": [ + { + "bbox": [ + 107, + 140, + 506, + 162 + ], + "type": "text", + "content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 169, + 505, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 169, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 107, + 169, + 505, + 190 + ], + "type": "text", + "content": "Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 198, + 504, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 198, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 107, + 198, + 504, + 220 + ], + "type": "text", + "content": "Jiuniu Wang, Hangjie Yuan, Dayou Chen, Yingya Zhang, Xiang Wang, and Shiwei Zhang. Modelscope text-to-video technical report. arXiv preprint arXiv:2308.06571, 2023a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 227, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 227, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 107, + 227, + 504, + 258 + ], + "type": "text", + "content": "Jing Wang, Fengzhuo Zhang, Xiaoli Li, Vincent YF Tan, Tianyu Pang, Chao Du, Aixin Sun, and Zhuoran Yang. Error analyses of auto-regressive video diffusion models: A unified framework. arXiv preprint arXiv:2503.10704, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 266, + 504, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 266, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 107, + 266, + 504, + 297 + ], + "type": "text", + "content": "Yaohui Wang, Xinyuan Chen, Xin Ma, Shangchen Zhou, Ziqi Huang, Yi Wang, Ceyuan Yang, Yinan He, Jiashuo Yu, Peiqing Yang, et al. Lavie: High-quality video generation with cascaded latent diffusion models. arXiv preprint arXiv:2309.15103, 2023b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 504, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 504, + 336 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 504, + 336 + ], + "type": "text", + "content": "Zhouxia Wang, Ziyang Yuan, Xintao Wang, Yaowei Li, Tianshui Chen, Menghan Xia, Ping Luo, and Ying Shan. Motionctrl: A unified and flexible motion controller for video generation. In ACM SIGGRAPH 2024 Conference Papers, pages 1-11, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 343, + 504, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 343, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 107, + 343, + 504, + 365 + ], + "type": "text", + "content": "Sibo Wu, Congrong Xu, Binbin Huang, Andreas Geiger, and Anpei Chen. Genfusion: Closing the loop between reconstruction and generation via videos. arXiv preprint arXiv:2503.21219, 2025a." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 373, + 505, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 373, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 107, + 373, + 505, + 403 + ], + "type": "text", + "content": "Tong Wu, Zhihao Fan, Xiao Liu, Yeyun Gong, Yelong Shen, Jian Jiao, Hai-Tao Zheng, Juntao Li, Zhongyu Wei, Jian Guo, Nan Duan, and Weizhu Chen. Ar-diffusion: Auto-regressive diffusion model for text generation, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 411, + 504, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 411, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 107, + 411, + 504, + 443 + ], + "type": "text", + "content": "Xindi Wu, Uriel Singer, Zhaojiang Lin, Andrea Madotto, Xide Xia, Yifan Xu, Paul Crook, Xin Luna Dong, and Seungwhan Moon. Corgi: Cached memory guided video generation. In 2025 IEEE/CVF Winter Conference on Applications of Computer Vision (WACV), pages 4585-4594. IEEE, 2025b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 450, + 504, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 450, + 504, + 472 + ], + "spans": [ + { + "bbox": [ + 107, + 450, + 504, + 472 + ], + "type": "text", + "content": "Zeqi Xiao, Wenqi Ouyang, Yifan Zhou, Shuai Yang, Lei Yang, Jianlou Si, and Xingang Pan. Trajectory attention for fine-grained video motion control. arXiv preprint arXiv:2411.19324, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 479, + 504, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 479, + 504, + 501 + ], + "spans": [ + { + "bbox": [ + 107, + 479, + 504, + 501 + ], + "type": "text", + "content": "Jingjing Xu, Xu Sun, Zhiyuan Zhang, Guangxiang Zhao, and Junyang Lin. Understanding and improving layer normalization. Advances in neural information processing systems, 32, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 508, + 505, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 508, + 505, + 529 + ], + "spans": [ + { + "bbox": [ + 107, + 508, + 505, + 529 + ], + "type": "text", + "content": "Mengjiao Yang, Yilun Du, Kamyar Ghasemipour, Jonathan Tompson, Dale Schuurmans, and Pieter Abbeel. Learning interactive real-world simulators. arXiv preprint arXiv:2310.06114, 1(2):6, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 536, + 505, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 536, + 505, + 559 + ], + "spans": [ + { + "bbox": [ + 107, + 536, + 505, + 559 + ], + "type": "text", + "content": "Tianwei Yin, Qiang Zhang, Richard Zhang, William T Freeman, Fredo Durand, Eli Shechtman, and Xun Huang. From slow bidirectional to fast causal video generators. arXiv preprint arXiv:2412.07772, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 565, + 504, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 565, + 504, + 587 + ], + "spans": [ + { + "bbox": [ + 107, + 565, + 504, + 587 + ], + "type": "text", + "content": "Hong-Xing Yu, Haoyi Duan, Charles Herrmann, William T Freeman, and Jiajun Wu. Wonderworld: Interactive 3d scene generation from a single image. arXiv preprint arXiv:2406.09394, 2024a." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 594, + 505, + 634 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 594, + 505, + 634 + ], + "spans": [ + { + "bbox": [ + 107, + 594, + 505, + 634 + ], + "type": "text", + "content": "Hong-Xing Yu, Haoyi Duan, Junhwa Hur, Kyle Sargent, Michael Rubinstein, William T Freeman, Forrester Cole, Deqing Sun, Noah Snavely, Jiajun Wu, et al. Wonderjourney: Going from anywhere to everywhere. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6658-6667, 2024b." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 107, + 643, + 505, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 643, + 505, + 665 + ], + "spans": [ + { + "bbox": [ + 107, + 643, + 505, + 665 + ], + "type": "text", + "content": "Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, Kun Gai, Hao Chen, and Xihui Liu. A survey of interactive generative video. arXiv preprint arXiv:2504.21853, 2025a." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 107, + 672, + 505, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 672, + 505, + 693 + ], + "spans": [ + { + "bbox": [ + 107, + 672, + 505, + 693 + ], + "type": "text", + "content": "Jiwen Yu, Yiran Qin, Haoxuan Che, Quande Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Position: Interactive generative video as next-generation game engine. arXiv preprint arXiv:2503.17359, 2025b." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 107, + 700, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 700, + 504, + 722 + ], + "type": "text", + "content": "Jiwen Yu, Yiran Qin, Xintao Wang, Pengfei Wan, Di Zhang, and Xihui Liu. Gamefactory: Creating new games with generative interactive videos. arXiv preprint arXiv:2501.08325, 2025c." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 236 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "text", + "content": "Wangbo Yu, Jinbo Xing, Li Yuan, Wenbo Hu, Xiaoyu Li, Zhipeng Huang, Xiangjun Gao, Tien-Tsin Wong, Ying Shan, and Yonghong Tian. Viewcrafter: Taming video diffusion models for high-fidelity novel view synthesis. arXiv preprint arXiv:2409.02048, 2024c." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "type": "text", + "content": "Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 586-595, 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 148, + 505, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 148, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 505, + 178 + ], + "type": "text", + "content": "Longtao Zheng, Yifan Zhang, Hanzhong Guo, Jiachun Pan, Zhenxiong Tan, Jiahao Lu, Chuanxin Tang, Bo An, and Shuicheng Yan. Memo: Memory-guided diffusion for expressive talking video generation. arXiv preprint arXiv:2412.04448, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 186, + 505, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 505, + 208 + ], + "type": "text", + "content": "Zangwei Zheng, Xiangyu Peng, Tianji Yang, Chenhui Shen, Shenggui Li, Hongxin Liu, Yukun Zhou, Tianyi Li, and Yang You. Open-sora: Democratizing efficient video production for all, 2024b." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 214, + 505, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 214, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 214, + 505, + 236 + ], + "type": "text", + "content": "Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: Learning view synthesis using multiplane images. In SIGGRAPH, 2018." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 257, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 257, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 257, + 85 + ], + "type": "text", + "content": "7 Supplementary Materials" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 95, + 236, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 95, + 236, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 236, + 108 + ], + "type": "text", + "content": "7.1 Details and Experiments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 116, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 116, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 116, + 506, + 150 + ], + "type": "text", + "content": "**Embedding designs.** We present the detailed designs of embeddings for timesteps, actions, poses, and timestamps in Figure 10, where " + }, + { + "bbox": [ + 104, + 116, + 506, + 150 + ], + "type": "inline_equation", + "content": "F, C, H, W, A" + }, + { + "bbox": [ + 104, + 116, + 506, + 150 + ], + "type": "text", + "content": " denote the frame number, channel count, height, width, and action count, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "text", + "content": "The input pose is parameterized by position " + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "inline_equation", + "content": "(x,z,y)" + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "text", + "content": " and orientation (pitch " + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "text", + "content": " and yaw " + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "text", + "content": "). The extrinsic matrix " + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "inline_equation", + "content": "\\mathbf{T} \\in \\mathbb{R}^{4 \\times 4}" + }, + { + "bbox": [ + 104, + 154, + 504, + 176 + ], + "type": "text", + "content": " is formed as:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 270, + 180, + 505, + 208 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 180, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 270, + 180, + 505, + 208 + ], + "type": "interline_equation", + "content": "\\mathbf {T} = \\left[ \\begin{array}{l l} \\mathbf {R} _ {c} & \\mathbf {c} \\\\ \\mathbf {0} ^ {T} & 1 \\end{array} \\right], \\tag {7}", + "image_path": "d5a5ea0b7abdf3a603a31e858eb811914a167bca52860a3c526c94862c02db14.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 213, + 294, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 294, + 228 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 294, + 228 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 213, + 294, + 228 + ], + "type": "inline_equation", + "content": "\\mathbf{c} = (x,z,y)^T" + }, + { + "bbox": [ + 104, + 213, + 294, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 213, + 294, + 228 + ], + "type": "inline_equation", + "content": "\\mathbf{R}_c = \\mathbf{R}_y(\\phi)\\mathbf{R}_x(\\theta)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 231, + 504, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 231, + 504, + 252 + ], + "spans": [ + { + "bbox": [ + 104, + 231, + 504, + 252 + ], + "type": "text", + "content": "To encode camera pose, we adopt the Plücker embedding. Given a pixel " + }, + { + "bbox": [ + 104, + 231, + 504, + 252 + ], + "type": "inline_equation", + "content": "(u,v)" + }, + { + "bbox": [ + 104, + 231, + 504, + 252 + ], + "type": "text", + "content": " with normalized camera coordinates:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 258, + 252, + 505, + 266 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 252, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 258, + 252, + 505, + 266 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\pi} _ {u v} = \\mathbf {K} ^ {- 1} [ u, v, 1 ] ^ {T}, \\tag {8}", + "image_path": "7e60e4ac83e5a8851f5a56840f7cc4b18e041198f40aa3ce69ed935d26ae78dc.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 269, + 193, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 269, + 193, + 280 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 193, + 280 + ], + "type": "text", + "content": "its world direction is:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 263, + 280, + 505, + 293 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 263, + 280, + 505, + 293 + ], + "spans": [ + { + "bbox": [ + 263, + 280, + 505, + 293 + ], + "type": "interline_equation", + "content": "\\mathbf {d} _ {u v} = \\mathbf {R} _ {c} \\boldsymbol {\\pi} _ {u v} + \\mathbf {c}. \\tag {9}", + "image_path": "5019318d9bedd2a41eeab53f93ae9a8dc4075660cc387a33e5ac9e4fd4af8336.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 296, + 215, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 296, + 215, + 308 + ], + "spans": [ + { + "bbox": [ + 105, + 296, + 215, + 308 + ], + "type": "text", + "content": "The Plücker embedding is:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 246, + 306, + 505, + 320 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 306, + 505, + 320 + ], + "spans": [ + { + "bbox": [ + 246, + 306, + 505, + 320 + ], + "type": "interline_equation", + "content": "\\mathbf {l} _ {u v} = \\left(\\mathbf {c} \\times \\mathbf {d} _ {u v}, \\mathbf {d} _ {u v}\\right) \\in \\mathbb {R} ^ {6}. \\tag {10}", + "image_path": "ff7c8d9812eb871f60dc03ba6cb00e8a26be809bd3b3c043a26142ca10e90a7e.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 322, + 310, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 322, + 310, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 322, + 310, + 335 + ], + "type": "text", + "content": "For a frame of size " + }, + { + "bbox": [ + 104, + 322, + 310, + 335 + ], + "type": "inline_equation", + "content": "H \\times W" + }, + { + "bbox": [ + 104, + 322, + 310, + 335 + ], + "type": "text", + "content": ", the full embedding is:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 270, + 340, + 505, + 354 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 270, + 340, + 505, + 354 + ], + "spans": [ + { + "bbox": [ + 270, + 340, + 505, + 354 + ], + "type": "interline_equation", + "content": "\\mathbf {L} _ {i} \\in \\mathbb {R} ^ {H \\times W \\times 6}. \\tag {11}", + "image_path": "137374daba42cc29ee3827d4a155e71a28fcefdc5271bea03e2c5223a0b3ef72.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 365, + 505, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 365, + 505, + 422 + ], + "spans": [ + { + "bbox": [ + 104, + 365, + 505, + 422 + ], + "type": "text", + "content": "Memory context length. We evaluate how different memory context lengths affect performance in the Minecraft benchmark. Table 7 shows that increasing the context length from 1 to 8 steadily boosts PSNR, lowers LPIPS, and reduces rFID. However, extending the length to 16 deteriorates results, indicating that excessive memory frames may introduce noise or reduce retrieval precision. A context length of 8 provides the best trade-off, yielding the highest PSNR and the lowest LPIPS and rFID." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 426, + 506, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 536 + ], + "type": "text", + "content": "Pose prediction. For interactive play, ground truth poses are not accessible. To address this, we designed a lightweight pose prediction module that estimates the pose of the next frame. As illustrated in Figure 9, the predictor takes the previous image, the previous pose, and the upcoming action as inputs and outputs the predicted next pose. This module enables the system to operate using actions alone, eliminating the need for ground truth poses during inference. In Table 8, we compare the performance of using predicted poses versus ground truth poses. While using ground truth poses yields better results across all metrics, the performance drop with predicted poses is acceptable. This is because our method does not rely heavily on precise pose predictions – new frames are generated based on these predictions – and the ground truth poses generated by the Minecraft simulator also contain a certain degree of randomness." + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 220, + 566, + 391, + 632 + ], + "blocks": [ + { + "bbox": [ + 195, + 553, + 414, + 565 + ], + "lines": [ + { + "bbox": [ + 195, + 553, + 414, + 565 + ], + "spans": [ + { + "bbox": [ + 195, + 553, + 414, + 565 + ], + "type": "text", + "content": "Table 7: Ablation on length of memory context length" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 220, + 566, + 391, + 632 + ], + "lines": [ + { + "bbox": [ + 220, + 566, + 391, + 632 + ], + "spans": [ + { + "bbox": [ + 220, + 566, + 391, + 632 + ], + "type": "table", + "html": "
LengthPSNR ↑LPIPS ↓rFID ↓
116.180.189920.47
418.680.156816.54
819.320.142915.37
1617.140.168718.33
", + "image_path": "f0004364140102367ad4d60778876a7c4c25fdc4c20dc2f01eae89736c3023e5.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_body" + } + ], + "index": 17 + }, + { + "type": "table", + "bbox": [ + 208, + 669, + 403, + 714 + ], + "blocks": [ + { + "bbox": [ + 153, + 656, + 457, + 668 + ], + "lines": [ + { + "bbox": [ + 153, + 656, + 457, + 668 + ], + "spans": [ + { + "bbox": [ + 153, + 656, + 457, + 668 + ], + "type": "text", + "content": "Table 8: Comparison between using predicted poses and ground truth poses" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 208, + 669, + 403, + 714 + ], + "lines": [ + { + "bbox": [ + 208, + 669, + 403, + 714 + ], + "spans": [ + { + "bbox": [ + 208, + 669, + 403, + 714 + ], + "type": "table", + "html": "
Pose TypePSNR ↑LPIPS ↓rFID ↓
Ground truth19.320.142915.37
Predicted17.130.178620.36
", + "image_path": "fa911a9e797ad09c23c99f30039cff030ba8c0a685c01c115f0232972453d2aa.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 212, + 78, + 399, + 203 + ], + "blocks": [ + { + "bbox": [ + 212, + 78, + 399, + 203 + ], + "lines": [ + { + "bbox": [ + 212, + 78, + 399, + 203 + ], + "spans": [ + { + "bbox": [ + 212, + 78, + 399, + 203 + ], + "type": "image", + "image_path": "43ccd54139ef24f20c1aefc610fed777c3dd8ace9ca8755f9903a916ced4749f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 213, + 380, + 225 + ], + "lines": [ + { + "bbox": [ + 229, + 213, + 380, + 225 + ], + "spans": [ + { + "bbox": [ + 229, + 213, + 380, + 225 + ], + "type": "text", + "content": "Figure 9: Structure of pose predictor." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 212, + 246, + 394, + 371 + ], + "blocks": [ + { + "bbox": [ + 212, + 246, + 394, + 371 + ], + "lines": [ + { + "bbox": [ + 212, + 246, + 394, + 371 + ], + "spans": [ + { + "bbox": [ + 212, + 246, + 394, + 371 + ], + "type": "image", + "image_path": "33a6d94605ecfe71ff82a31473937beb82fd235cd731a85bcb700378ff2ddd3a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 212, + 379, + 291, + 390 + ], + "lines": [ + { + "bbox": [ + 212, + 379, + 291, + 390 + ], + "spans": [ + { + "bbox": [ + 212, + 379, + 291, + 390 + ], + "type": "text", + "content": "(a) Timestep embedding" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 318, + 380, + 390, + 390 + ], + "lines": [ + { + "bbox": [ + 318, + 380, + 390, + 390 + ], + "spans": [ + { + "bbox": [ + 318, + 380, + 390, + 390 + ], + "type": "text", + "content": "(b) Action embedding" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 213, + 411, + 394, + 544 + ], + "blocks": [ + { + "bbox": [ + 213, + 411, + 394, + 544 + ], + "lines": [ + { + "bbox": [ + 213, + 411, + 394, + 544 + ], + "spans": [ + { + "bbox": [ + 213, + 411, + 394, + 544 + ], + "type": "image", + "image_path": "87369e340a68364c85e6e43c777c9d3474916f9a9513dff25ee3cb2472787016.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 219, + 552, + 284, + 562 + ], + "lines": [ + { + "bbox": [ + 219, + 552, + 284, + 562 + ], + "spans": [ + { + "bbox": [ + 219, + 552, + 284, + 562 + ], + "type": "text", + "content": "(c) Pose embedding" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 312, + 552, + 397, + 562 + ], + "lines": [ + { + "bbox": [ + 312, + 552, + 397, + 562 + ], + "spans": [ + { + "bbox": [ + 312, + 552, + 397, + 562 + ], + "type": "text", + "content": "(d) Timestamp embedding" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 208, + 574, + 400, + 586 + ], + "lines": [ + { + "bbox": [ + 208, + 574, + 400, + 586 + ], + "spans": [ + { + "bbox": [ + 208, + 574, + 400, + 586 + ], + "type": "text", + "content": "Figure 10: Illustration of different embeddings." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 608, + 300, + 620 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 608, + 300, + 620 + ], + "spans": [ + { + "bbox": [ + 105, + 608, + 300, + 620 + ], + "type": "text", + "content": "7.2 Memory Usage and Scalability Analysis" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 651 + ], + "type": "text", + "content": "To assess the scalability and practical feasibility of our method, we provide detailed quantitative analysis covering memory usage, generation duration, training cost, and inference efficiency." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 664, + 504, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 664, + 504, + 688 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 504, + 688 + ], + "type": "text", + "content": "Memory Usage of the Memory Bank. The memory bank is lightweight. Storing 600 visual memory tokens with shape [600, 16, 18, 32] in float32 takes approximately 21MB." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "Retrieval Latency. Below we report the average retrieval time (for 8 memory frames) as a function of memory bank size:" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 208, + 77, + 399, + 208 + ], + "blocks": [ + { + "bbox": [ + 208, + 77, + 399, + 208 + ], + "lines": [ + { + "bbox": [ + 208, + 77, + 399, + 208 + ], + "spans": [ + { + "bbox": [ + 208, + 77, + 399, + 208 + ], + "type": "image", + "image_path": "54085ce17ba039df16122eec09ce0693f531d932564155b51c6ddc1fd60662ac.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 197, + 214, + 411, + 227 + ], + "lines": [ + { + "bbox": [ + 197, + 214, + 411, + 227 + ], + "spans": [ + { + "bbox": [ + 197, + 214, + 411, + 227 + ], + "type": "text", + "content": "Figure 11: Two-view FOV overlapping visualization." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 192, + 247, + 418, + 316 + ], + "blocks": [ + { + "bbox": [ + 192, + 247, + 418, + 316 + ], + "lines": [ + { + "bbox": [ + 192, + 247, + 418, + 316 + ], + "spans": [ + { + "bbox": [ + 192, + 247, + 418, + 316 + ], + "type": "table", + "html": "
Number of Memory CandidatesRetrieval Time (s)
100.04
1000.06
6000.10
10000.16
", + "image_path": "311186cc1d836831fefdca576808fd26c822e109d2ddfda303da4ad7c48f137b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 326, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 504, + 348 + ], + "type": "text", + "content": "The generation cost (20 denoising steps) is " + }, + { + "bbox": [ + 104, + 326, + 504, + 348 + ], + "type": "inline_equation", + "content": "\\sim 0.9" + }, + { + "bbox": [ + 104, + 326, + 504, + 348 + ], + "type": "text", + "content": "s per frame. Retrieval time accounts for only " + }, + { + "bbox": [ + 104, + 326, + 504, + 348 + ], + "type": "inline_equation", + "content": "10 - 20\\%" + }, + { + "bbox": [ + 104, + 326, + 504, + 348 + ], + "type": "text", + "content": " of total inference time even with 1000 candidates." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 361, + 505, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 505, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 505, + 396 + ], + "type": "text", + "content": "Comparison with Baseline. We compare our method with a baseline model (without memory), under consistent settings: 8 context frames, 8 memory frames, 20 denoising steps, and no acceleration techniques, on single H200." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 149, + 407, + 461, + 464 + ], + "blocks": [ + { + "bbox": [ + 149, + 407, + 461, + 464 + ], + "lines": [ + { + "bbox": [ + 149, + 407, + 461, + 464 + ], + "spans": [ + { + "bbox": [ + 149, + 407, + 461, + 464 + ], + "type": "table", + "html": "
MethodTrainingInference
Mem. UsageSpeed (it/s)Mem. UsageSpeed (it/s)
w/o Memory33 GB3.199 GB1.03
with Memory51 GB1.7611 GB0.89
", + "image_path": "9491f18c075d1aadae6c839aeb0789004bda41f81162625333e60688614b9348.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 474, + 504, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 474, + 504, + 497 + ], + "spans": [ + { + "bbox": [ + 104, + 474, + 504, + 497 + ], + "type": "text", + "content": "Adding memory introduces moderate training overhead. During inference, the impact is minimal: only a small increase in memory usage and a slight decrease in speed." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 510, + 505, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 505, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 505, + 533 + ], + "type": "text", + "content": "Inference Optimization. With modern acceleration techniques (e.g., timestep distillation, early exit, sparse attention), inference speed can reach " + }, + { + "bbox": [ + 104, + 510, + 505, + 533 + ], + "type": "inline_equation", + "content": "\\sim 10" + }, + { + "bbox": [ + 104, + 510, + 505, + 533 + ], + "type": "text", + "content": " FPS, making our method practical for deployment." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 536, + 504, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 504, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 504, + 561 + ], + "type": "text", + "content": "FOV Overlapping Computation. We present the details of Monte Carlo-based FOV overlapping computation in Alg. 11, as well as the two-view overlapping sampling in Figure 11." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 574, + 190, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 190, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 190, + 586 + ], + "type": "text", + "content": "7.3 Visualizations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 596, + 477, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 477, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 477, + 609 + ], + "type": "text", + "content": "In this section, we provide more visualization of different aspects to facilitate understanding." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 612, + 504, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 504, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 504, + 646 + ], + "type": "text", + "content": "Minecraft Training Examples. We present a diverse set of training environments that include various terrain types, action spaces, and weather conditions, as shown in Figure 12. These variations help enhance the model's adaptability and robustness in different scenarios." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 650, + 504, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 650, + 504, + 684 + ], + "spans": [ + { + "bbox": [ + 104, + 650, + 504, + 684 + ], + "type": "text", + "content": "Trajectory Examples in Minecraft. Figure 13 illustrates trajectory examples in the x-z space over 100 frames. The agent's movement exhibits a random action pattern, ensuring diverse learning objectives and a broad range of sampled experiences." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "Pose Distribution. We collect and visualize 800 samples within a sampling range of 8, as shown in Figure 14. The random pattern observed in Figure 14 ensures a diverse distribution of sampled poses in space, which is beneficial for learning the reasoning process within the memory blocks." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 75, + 447, + 87 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 75, + 447, + 87 + ], + "spans": [ + { + "bbox": [ + 105, + 75, + 447, + 87 + ], + "type": "text", + "content": "Algorithm 2: Monte Carlo-based FOV Overlap Computation (Notationally Disjoint)" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 89, + 135, + 100 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 89, + 135, + 100 + ], + "spans": [ + { + "bbox": [ + 106, + 89, + 135, + 100 + ], + "type": "text", + "content": "Input:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 99, + 496, + 182 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 99, + 496, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 99, + 496, + 122 + ], + "spans": [ + { + "bbox": [ + 132, + 99, + 496, + 122 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 99, + 496, + 122 + ], + "type": "inline_equation", + "content": "Q_{\\mathrm{ref}} \\in \\mathbb{R}^{F \\times 5}" + }, + { + "bbox": [ + 132, + 99, + 496, + 122 + ], + "type": "text", + "content": ": reference poses from memory bank (x,y,z,pitch,yaw), " + }, + { + "bbox": [ + 132, + 99, + 496, + 122 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 132, + 99, + 496, + 122 + ], + "type": "text", + "content": " is the number of stored poses." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 125, + 326, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 125, + 326, + 137 + ], + "spans": [ + { + "bbox": [ + 132, + 125, + 326, + 137 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 125, + 326, + 137 + ], + "type": "inline_equation", + "content": "Q_{\\mathrm{tgt}} \\in \\mathbb{R}^5" + }, + { + "bbox": [ + 132, + 125, + 326, + 137 + ], + "type": "text", + "content": ": pose of the current (target) frame." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 140, + 343, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 140, + 343, + 152 + ], + "spans": [ + { + "bbox": [ + 132, + 140, + 343, + 152 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 140, + 343, + 152 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 132, + 140, + 343, + 152 + ], + "type": "text", + "content": ": number of 3D sample points (default 10,000)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 156, + 335, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 156, + 335, + 167 + ], + "spans": [ + { + "bbox": [ + 132, + 156, + 335, + 167 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 156, + 335, + 167 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 132, + 156, + 335, + 167 + ], + "type": "text", + "content": ": radius of the sampling sphere (default " + }, + { + "bbox": [ + 132, + 156, + 335, + 167 + ], + "type": "inline_equation", + "content": "30\\mathrm{m}" + }, + { + "bbox": [ + 132, + 156, + 335, + 167 + ], + "type": "text", + "content": ")." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 171, + 384, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 171, + 384, + 182 + ], + "spans": [ + { + "bbox": [ + 132, + 171, + 384, + 182 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 171, + 384, + 182 + ], + "type": "inline_equation", + "content": "\\phi_h" + }, + { + "bbox": [ + 132, + 171, + 384, + 182 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 132, + 171, + 384, + 182 + ], + "type": "inline_equation", + "content": "\\phi_v" + }, + { + "bbox": [ + 132, + 171, + 384, + 182 + ], + "type": "text", + "content": ": horizontal/vertical field-of-view angles (in degrees)." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 186, + 143, + 197 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 143, + 197 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 143, + 197 + ], + "type": "text", + "content": "Output:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 132, + 199, + 450, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 199, + 450, + 213 + ], + "spans": [ + { + "bbox": [ + 132, + 199, + 450, + 213 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 132, + 199, + 450, + 213 + ], + "type": "inline_equation", + "content": "\\rho \\in \\mathbb{R}^F" + }, + { + "bbox": [ + 132, + 199, + 450, + 213 + ], + "type": "text", + "content": ": overlapping ratios between each reference pose and the target pose." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 216, + 132, + 226 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 216, + 132, + 226 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 132, + 226 + ], + "type": "text", + "content": "begin" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 225, + 296, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 225, + 296, + 236 + ], + "spans": [ + { + "bbox": [ + 120, + 225, + 296, + 236 + ], + "type": "inline_equation", + "content": "\\triangle" + }, + { + "bbox": [ + 120, + 225, + 296, + 236 + ], + "type": "text", + "content": " Step 1: Random Sampling in a Sphere" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "spans": [ + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "text", + "content": "Generate " + }, + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "text", + "content": " points " + }, + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "text", + "content": " uniformly in a 3D sphere of radius " + }, + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 120, + 236, + 361, + 247 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 242, + 253, + 372, + 266 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 253, + 372, + 266 + ], + "spans": [ + { + "bbox": [ + 242, + 253, + 372, + 266 + ], + "type": "interline_equation", + "content": "\\mathbf {q} \\leftarrow \\text {P o i n t S a m p l i n g} (M, R).", + "image_path": "c138eeacdb33bbe74ed7ef0bd75d5384412bddd857298e5c09d61013a7190e19.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 119, + 271, + 311, + 284 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 271, + 311, + 284 + ], + "spans": [ + { + "bbox": [ + 119, + 271, + 311, + 284 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 119, + 271, + 311, + 284 + ], + "type": "text", + "content": " Step 2: Translate Points to " + }, + { + "bbox": [ + 119, + 271, + 311, + 284 + ], + "type": "inline_equation", + "content": "Q_{\\mathrm{tgt}}" + }, + { + "bbox": [ + 119, + 271, + 311, + 284 + ], + "type": "text", + "content": " as Center" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 119, + 283, + 490, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 283, + 490, + 295 + ], + "spans": [ + { + "bbox": [ + 119, + 283, + 490, + 295 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 119, + 283, + 490, + 295 + ], + "type": "inline_equation", + "content": "Q_{\\mathrm{tgt}}(x,y,z)" + }, + { + "bbox": [ + 119, + 283, + 490, + 295 + ], + "type": "text", + "content": " be the 3D coordinates of the current camera pose. Shift all sampled points:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 255, + 300, + 358, + 313 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 300, + 358, + 313 + ], + "spans": [ + { + "bbox": [ + 255, + 300, + 358, + 313 + ], + "type": "interline_equation", + "content": "\\mathbf {q} \\leftarrow \\mathbf {q} + Q _ {\\mathrm {t g t}} (x, y, z).", + "image_path": "adad793cee2cc8f4ff35518d948a07a8d8ddce810d0efbfd57b5ddbd631ca21e.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 120, + 318, + 220, + 330 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 318, + 220, + 330 + ], + "spans": [ + { + "bbox": [ + 120, + 318, + 220, + 330 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 120, + 318, + 220, + 330 + ], + "type": "text", + "content": " Step 3: FOV Checks" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 119, + 328, + 490, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 328, + 490, + 353 + ], + "spans": [ + { + "bbox": [ + 119, + 328, + 490, + 353 + ], + "type": "text", + "content": "Compute a boolean matrix " + }, + { + "bbox": [ + 119, + 328, + 490, + 353 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{\\mathrm{ref}} \\in \\{0,1\\}^{F \\times M}" + }, + { + "bbox": [ + 119, + 328, + 490, + 353 + ], + "type": "text", + "content": ", where each entry indicates if a point in " + }, + { + "bbox": [ + 119, + 328, + 490, + 353 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 119, + 328, + 490, + 353 + ], + "type": "text", + "content": " lies in the FOV of a reference pose:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 222, + 357, + 392, + 379 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 357, + 392, + 379 + ], + "spans": [ + { + "bbox": [ + 222, + 357, + 392, + 379 + ], + "type": "interline_equation", + "content": "\\mathbf {v} _ {\\mathrm {r e f}} \\leftarrow \\operatorname {I s I n s i d e F O V} \\big (\\mathbf {q}, Q _ {\\mathrm {r e f}}, \\phi_ {h}, \\phi_ {v} \\big).", + "image_path": "faac491d8ffa58fdb88b64c55cd0ac817d3a060c561d4c7658041b8daa232b65.jpg" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 125, + 384, + 414, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 384, + 414, + 398 + ], + "spans": [ + { + "bbox": [ + 125, + 384, + 414, + 398 + ], + "type": "text", + "content": "Similarly, compute a boolean vector " + }, + { + "bbox": [ + 125, + 384, + 414, + 398 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{\\mathrm{tg}} \\in \\{0,1\\}^{M}" + }, + { + "bbox": [ + 125, + 384, + 414, + 398 + ], + "type": "text", + "content": " for the target pose:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 222, + 403, + 392, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 403, + 392, + 424 + ], + "spans": [ + { + "bbox": [ + 222, + 403, + 392, + 424 + ], + "type": "interline_equation", + "content": "\\mathbf {v} _ {\\mathrm {t g t}} \\leftarrow \\operatorname {I s I n s i d e F O V} \\big (\\mathbf {q}, Q _ {\\mathrm {t g t}}, \\phi_ {h}, \\phi_ {v} \\big).", + "image_path": "d7f7be95b011155ed7b2564d69783c24b25e5ffb775447a6f51ee1f3ba9ab8fb.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 428, + 304, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 428, + 304, + 440 + ], + "spans": [ + { + "bbox": [ + 120, + 428, + 304, + 440 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 120, + 428, + 304, + 440 + ], + "type": "text", + "content": " Step 4: Overlapping Ratio Computation" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "spans": [ + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "text", + "content": "Obtain the final overlapping ratio vector " + }, + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "inline_equation", + "content": "\\pmb {\\rho}\\in \\mathbb{R}^{F}" + }, + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "text", + "content": " by combining " + }, + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{\\mathrm{ref}}" + }, + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "inline_equation", + "content": "\\mathbf{v}_{\\mathrm{tgt}}" + }, + { + "bbox": [ + 120, + 439, + 487, + 452 + ], + "type": "text", + "content": ". For instance," + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 233, + 458, + 381, + 492 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 458, + 381, + 492 + ], + "spans": [ + { + "bbox": [ + 233, + 458, + 381, + 492 + ], + "type": "interline_equation", + "content": "\\boldsymbol {\\rho} [ i ] = \\frac {1}{M} \\sum_ {j = 1} ^ {M} \\left(\\mathbf {v} _ {\\mathrm {r e f}} [ i, j ] \\cdot \\mathbf {v} _ {\\mathrm {t g t}} [ j ]\\right),", + "image_path": "4cd14cfef9f9a857f1658afa482563f9f9aae5ff0fde6e994ca27c3ce2daf2f1.jpg" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 124, + 497, + 490, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 497, + 490, + 520 + ], + "spans": [ + { + "bbox": [ + 124, + 497, + 490, + 520 + ], + "type": "text", + "content": "to measure the fraction of sampled points that are visible in both the " + }, + { + "bbox": [ + 124, + 497, + 490, + 520 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 124, + 497, + 490, + 520 + ], + "type": "text", + "content": "-th reference pose and the target pose." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 121, + 520, + 165, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 520, + 165, + 531 + ], + "spans": [ + { + "bbox": [ + 121, + 520, + 165, + 531 + ], + "type": "text", + "content": "Return " + }, + { + "bbox": [ + 121, + 520, + 165, + 531 + ], + "type": "inline_equation", + "content": "\\rho" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 105, + 530, + 124, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 530, + 124, + 540 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 124, + 540 + ], + "type": "text", + "content": "end" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 104, + 568, + 504, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 504, + 591 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 504, + 591 + ], + "type": "text", + "content": "More Qualitative Results. For additional qualitative examples, we recommend consulting the attached web page, which offers enhanced visualizations." + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 110, + 95, + 206, + 150 + ], + "blocks": [ + { + "bbox": [ + 110, + 95, + 206, + 150 + ], + "lines": [ + { + "bbox": [ + 110, + 95, + 206, + 150 + ], + "spans": [ + { + "bbox": [ + 110, + 95, + 206, + 150 + ], + "type": "image", + "image_path": "7260ec179d4330f3a596be59f60ebb624909fec6a3bdbace805bf1f660641908.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 209, + 95, + 304, + 150 + ], + "blocks": [ + { + "bbox": [ + 209, + 95, + 304, + 150 + ], + "lines": [ + { + "bbox": [ + 209, + 95, + 304, + 150 + ], + "spans": [ + { + "bbox": [ + 209, + 95, + 304, + 150 + ], + "type": "image", + "image_path": "9e7db2f2124ac20efa2edca5ec9e00b0b6d99dcd36451b763e6533c66aad42f2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 307, + 95, + 403, + 150 + ], + "blocks": [ + { + "bbox": [ + 307, + 95, + 403, + 150 + ], + "lines": [ + { + "bbox": [ + 307, + 95, + 403, + 150 + ], + "spans": [ + { + "bbox": [ + 307, + 95, + 403, + 150 + ], + "type": "image", + "image_path": "9e73712af021410a3cc6091b7f271192d0761ef5e0bbac919792a0ed2bc5e942.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 406, + 95, + 501, + 150 + ], + "blocks": [ + { + "bbox": [ + 406, + 95, + 501, + 150 + ], + "lines": [ + { + "bbox": [ + 406, + 95, + 501, + 150 + ], + "spans": [ + { + "bbox": [ + 406, + 95, + 501, + 150 + ], + "type": "image", + "image_path": "b4112e672e093bd908e0d8b47f478e0720181eb791bfc4170bf71a494e2cad04.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 110, + 152, + 206, + 207 + ], + "blocks": [ + { + "bbox": [ + 110, + 152, + 206, + 207 + ], + "lines": [ + { + "bbox": [ + 110, + 152, + 206, + 207 + ], + "spans": [ + { + "bbox": [ + 110, + 152, + 206, + 207 + ], + "type": "image", + "image_path": "82b66f7e4a39cf80e04885bbb128c8ee9424241e8ae642b1dd992428acd71103.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 209, + 152, + 304, + 207 + ], + "blocks": [ + { + "bbox": [ + 209, + 152, + 304, + 207 + ], + "lines": [ + { + "bbox": [ + 209, + 152, + 304, + 207 + ], + "spans": [ + { + "bbox": [ + 209, + 152, + 304, + 207 + ], + "type": "image", + "image_path": "b2fb6711c620d4ecd7ae763026c5f9b183d24737649a7f6c09253b2753625d9a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 307, + 152, + 403, + 207 + ], + "blocks": [ + { + "bbox": [ + 307, + 152, + 403, + 207 + ], + "lines": [ + { + "bbox": [ + 307, + 152, + 403, + 207 + ], + "spans": [ + { + "bbox": [ + 307, + 152, + 403, + 207 + ], + "type": "image", + "image_path": "9208587645d921b10c68d516401d5b030a2fcaee02b18f07e376e379a667fee0.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 406, + 152, + 501, + 207 + ], + "blocks": [ + { + "bbox": [ + 406, + 152, + 501, + 207 + ], + "lines": [ + { + "bbox": [ + 406, + 152, + 501, + 207 + ], + "spans": [ + { + "bbox": [ + 406, + 152, + 501, + 207 + ], + "type": "image", + "image_path": "1554543ce78d3b089e0c1e1756fb0bc850201acde2f34528a5b9ac40bfc5306d.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 110, + 209, + 206, + 263 + ], + "blocks": [ + { + "bbox": [ + 110, + 209, + 206, + 263 + ], + "lines": [ + { + "bbox": [ + 110, + 209, + 206, + 263 + ], + "spans": [ + { + "bbox": [ + 110, + 209, + 206, + 263 + ], + "type": "image", + "image_path": "13ddfe8d2bb188aff88dfc8e860cf494addddb6a22e5b2f076a89b7033c0e483.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 209, + 209, + 304, + 263 + ], + "blocks": [ + { + "bbox": [ + 209, + 209, + 304, + 263 + ], + "lines": [ + { + "bbox": [ + 209, + 209, + 304, + 263 + ], + "spans": [ + { + "bbox": [ + 209, + 209, + 304, + 263 + ], + "type": "image", + "image_path": "e45872d86a46e6fd48f3db8a029253a71ef4ec766b935af977cd99b1b0592ac2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 307, + 209, + 403, + 263 + ], + "blocks": [ + { + "bbox": [ + 307, + 209, + 403, + 263 + ], + "lines": [ + { + "bbox": [ + 307, + 209, + 403, + 263 + ], + "spans": [ + { + "bbox": [ + 307, + 209, + 403, + 263 + ], + "type": "image", + "image_path": "6570f481c382fe021ed13888148b604c0b603bb4d891f79cec1de0d0a488be05.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 406, + 209, + 501, + 263 + ], + "blocks": [ + { + "bbox": [ + 406, + 209, + 501, + 263 + ], + "lines": [ + { + "bbox": [ + 406, + 209, + 501, + 263 + ], + "spans": [ + { + "bbox": [ + 406, + 209, + 501, + 263 + ], + "type": "image", + "image_path": "c559b904ce54ed8b960ef64d960db2b2626c58caca3d6c512ff9ee87f3d438a7.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 110, + 265, + 206, + 319 + ], + "blocks": [ + { + "bbox": [ + 110, + 265, + 206, + 319 + ], + "lines": [ + { + "bbox": [ + 110, + 265, + 206, + 319 + ], + "spans": [ + { + "bbox": [ + 110, + 265, + 206, + 319 + ], + "type": "image", + "image_path": "e7a0d754531749230bbd4a9a05832bffb34d64393251380876fd18cad297f056.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 209, + 265, + 304, + 319 + ], + "blocks": [ + { + "bbox": [ + 209, + 265, + 304, + 319 + ], + "lines": [ + { + "bbox": [ + 209, + 265, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 209, + 265, + 304, + 319 + ], + "type": "image", + "image_path": "e21770768b7f5933c8a6579cb05bb882d2db0cd3a528b963e76b28e85fc3f88e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 307, + 265, + 403, + 319 + ], + "blocks": [ + { + "bbox": [ + 307, + 265, + 403, + 319 + ], + "lines": [ + { + "bbox": [ + 307, + 265, + 403, + 319 + ], + "spans": [ + { + "bbox": [ + 307, + 265, + 403, + 319 + ], + "type": "image", + "image_path": "3dae4109068741e9358f878bbc3dc4031d853cea0cc5c6c8a1494fcebea62548.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 406, + 265, + 501, + 319 + ], + "blocks": [ + { + "bbox": [ + 406, + 265, + 501, + 319 + ], + "lines": [ + { + "bbox": [ + 406, + 265, + 501, + 319 + ], + "spans": [ + { + "bbox": [ + 406, + 265, + 501, + 319 + ], + "type": "image", + "image_path": "56325b6b2dbc279e3cdfcac989057aba84971aad1af70291e761a6af0e60f513.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 110, + 322, + 206, + 376 + ], + "blocks": [ + { + "bbox": [ + 110, + 322, + 206, + 376 + ], + "lines": [ + { + "bbox": [ + 110, + 322, + 206, + 376 + ], + "spans": [ + { + "bbox": [ + 110, + 322, + 206, + 376 + ], + "type": "image", + "image_path": "d885c7d407ce44ff8633d17891065c18fb770ac9a2dbdb8904c89b0abb280874.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 386, + 506, + 409 + ], + "lines": [ + { + "bbox": [ + 105, + 386, + 506, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 386, + 506, + 409 + ], + "type": "text", + "content": "Figure 12: Training Examples. Our training environments encompass diverse terrains, action spaces, and weather conditions, providing a comprehensive setting for learning." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 209, + 322, + 304, + 376 + ], + "blocks": [ + { + "bbox": [ + 209, + 322, + 304, + 376 + ], + "lines": [ + { + "bbox": [ + 209, + 322, + 304, + 376 + ], + "spans": [ + { + "bbox": [ + 209, + 322, + 304, + 376 + ], + "type": "image", + "image_path": "9759da0bae6bff88da79c18c7517e84bdbc403c95500c5810822ec675e10eb60.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 307, + 322, + 403, + 376 + ], + "blocks": [ + { + "bbox": [ + 307, + 322, + 403, + 376 + ], + "lines": [ + { + "bbox": [ + 307, + 322, + 403, + 376 + ], + "spans": [ + { + "bbox": [ + 307, + 322, + 403, + 376 + ], + "type": "image", + "image_path": "ef99e53069789de1c72b40dec9daf83482c8b1d58b900b04b7b673a7536cdbeb.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 406, + 321, + 501, + 376 + ], + "blocks": [ + { + "bbox": [ + 406, + 321, + 501, + 376 + ], + "lines": [ + { + "bbox": [ + 406, + 321, + 501, + 376 + ], + "spans": [ + { + "bbox": [ + 406, + 321, + 501, + 376 + ], + "type": "image", + "image_path": "b7b3bfbd5ce5428451351fb16ec099c6e94af83843c985d33a1808711993472e.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 168, + 460, + 440, + 666 + ], + "blocks": [ + { + "bbox": [ + 168, + 460, + 440, + 666 + ], + "lines": [ + { + "bbox": [ + 168, + 460, + 440, + 666 + ], + "spans": [ + { + "bbox": [ + 168, + 460, + 440, + 666 + ], + "type": "image", + "image_path": "f69aaeecb41bae0b361ddaf6325948bcdd310d6207adc846da6a8605dba8f003.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "lines": [ + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 673, + 504, + 696 + ], + "type": "text", + "content": "Figure 13: Visualization of Trajectory Examples in the X-Z Space. The axis scales represent distances within the Minecraft environment." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 168, + 251, + 434, + 506 + ], + "blocks": [ + { + "bbox": [ + 168, + 251, + 434, + 506 + ], + "lines": [ + { + "bbox": [ + 168, + 251, + 434, + 506 + ], + "spans": [ + { + "bbox": [ + 168, + 251, + 434, + 506 + ], + "type": "image", + "image_path": "57ab4223a27e3897885abbdfe8c890272849a9e1349679df129a8d7cc0014606.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 514, + 504, + 537 + ], + "lines": [ + { + "bbox": [ + 105, + 514, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 105, + 514, + 504, + 537 + ], + "type": "text", + "content": "Figure 14: Visualization of Relative Pose Distribution for Training in X-Z Space. Red dots indicate positions, while yellow arrows represent directions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_content_list.json b/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..6648a05389415a8676bf48b62d129dcd80026bca --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_content_list.json @@ -0,0 +1,4558 @@ +[ + { + "type": "text", + "text": "NTIRE 2025 Challenge on Event-Based Image Deblurring: Methods and Results", + "text_level": 1, + "bbox": [ + 91, + 130, + 903, + 152 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lei Sun*", + "bbox": [ + 143, + 181, + 218, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Boxin Shi*", + "bbox": [ + 127, + 199, + 215, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Andrea Alfarano*", + "bbox": [ + 264, + 181, + 405, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Radu Timofte*", + "bbox": [ + 256, + 199, + 375, + 214 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Peiqi Duan*", + "bbox": [ + 450, + 181, + 547, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Danda Pani Paudel*", + "bbox": [ + 416, + 199, + 573, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shaolin Su*", + "bbox": [ + 591, + 181, + 687, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Luc Van Gool*", + "bbox": [ + 614, + 199, + 733, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kaiwei Wang*", + "bbox": [ + 730, + 181, + 848, + 198 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qinglin Liu", + "bbox": [ + 774, + 199, + 870, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wei Yu", + "bbox": [ + 107, + 215, + 168, + 232 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaogian Lv", + "bbox": [ + 205, + 215, + 305, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lu Yang", + "bbox": [ + 343, + 217, + 413, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shuigen Wang", + "bbox": [ + 449, + 217, + 566, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shengping Zhang", + "bbox": [ + 604, + 217, + 746, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiangyang Ji", + "bbox": [ + 781, + 217, + 888, + 233 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Long Bao", + "bbox": [ + 124, + 234, + 204, + 250 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yuqiang", + "bbox": [ + 245, + 234, + 305, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jinao Song", + "bbox": [ + 395, + 234, + 485, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ziyi Wang", + "bbox": [ + 522, + 234, + 612, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shuang Wen", + "bbox": [ + 653, + 234, + 751, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Heng Sun", + "bbox": [ + 789, + 234, + 872, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kean Liu", + "bbox": [ + 107, + 252, + 184, + 267 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mingchen Zhong", + "bbox": [ + 220, + 252, + 357, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Senyan Xu", + "bbox": [ + 395, + 252, + 485, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhijing Sun", + "bbox": [ + 521, + 252, + 617, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiaying Zhu", + "bbox": [ + 651, + 252, + 751, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chengjie 6", + "bbox": [ + 787, + 252, + 870, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xingbo Wang", + "bbox": [ + 125, + 270, + 236, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yidi Liu", + "bbox": [ + 277, + 270, + 344, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xueyang Fu", + "bbox": [ + 483, + 270, + 581, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zheng-Jun Zha", + "bbox": [ + 622, + 270, + 743, + 286 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dawei Fan", + "bbox": [ + 785, + 270, + 870, + 285 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Dafeng Zhang", + "bbox": [ + 109, + 287, + 225, + 303 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yong Yang", + "bbox": [ + 264, + 287, + 352, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Siru Zhang", + "bbox": [ + 390, + 287, + 478, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qinghua Yang", + "bbox": [ + 517, + 287, + 633, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hao Kang", + "bbox": [ + 669, + 287, + 754, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Huiyuan Fu", + "bbox": [ + 790, + 287, + 887, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Heng Zhang", + "bbox": [ + 156, + 305, + 256, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hongyuan", + "bbox": [ + 302, + 305, + 383, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhijuan Huang", + "bbox": [ + 457, + 305, + 578, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shuoyan Wei", + "bbox": [ + 624, + 305, + 730, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Feng Li", + "bbox": [ + 776, + 305, + 841, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Runmin Cong", + "bbox": [ + 117, + 323, + 232, + 339 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weiqi Luo", + "bbox": [ + 271, + 323, + 356, + 339 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Mingyun Lin", + "bbox": [ + 398, + 323, + 501, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chenxu Jiang", + "bbox": [ + 542, + 323, + 650, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Hongyi Liu Lei Yu", + "bbox": [ + 689, + 323, + 877, + 338 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weilun Li", + "bbox": [ + 125, + 340, + 205, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiajun Zhai", + "bbox": [ + 245, + 340, + 336, + 356 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ngting Lin", + "bbox": [ + 398, + 340, + 477, + 356 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shuang Ma", + "bbox": [ + 517, + 340, + 609, + 356 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sai Zhou", + "bbox": [ + 651, + 340, + 723, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhanwen Liu", + "bbox": [ + 763, + 340, + 870, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yang Wang", + "bbox": [ + 127, + 358, + 220, + 375 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Eiffel Chong", + "bbox": [ + 261, + 358, + 364, + 375 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nuwan Bandara", + "bbox": [ + 406, + 358, + 534, + 373 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Thivya Kandappu", + "bbox": [ + 573, + 358, + 717, + 375 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Archan Misra", + "bbox": [ + 759, + 358, + 869, + 373 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yihang Chen", + "bbox": [ + 156, + 375, + 263, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhan Li", + "bbox": [ + 308, + 375, + 372, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Weijun Yuan", + "bbox": [ + 419, + 375, + 522, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wenzhuo Wang", + "bbox": [ + 568, + 375, + 694, + 392 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Boyang Yao", + "bbox": [ + 740, + 375, + 839, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhanglu Chen", + "bbox": [ + 119, + 393, + 233, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yijing Sun", + "bbox": [ + 272, + 393, + 359, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tianjiao Wan", + "bbox": [ + 400, + 393, + 506, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zijian Gao", + "bbox": [ + 545, + 393, + 633, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Qisheng Xu", + "bbox": [ + 673, + 393, + 769, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kele Xu", + "bbox": [ + 808, + 393, + 875, + 407 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yukun Zhang", + "bbox": [ + 147, + 411, + 254, + 426 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yu He", + "bbox": [ + 297, + 411, + 349, + 426 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaoyan Xie", + "bbox": [ + 393, + 411, + 495, + 426 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tao Fu", + "bbox": [ + 537, + 411, + 596, + 426 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yashu Gautamkumar Patel", + "bbox": [ + 638, + 411, + 851, + 426 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Vihar Ramesh Jain", + "bbox": [ + 153, + 428, + 303, + 444 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Divesh Basina", + "bbox": [ + 349, + 428, + 464, + 443 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Rishik Ashili", + "bbox": [ + 509, + 428, + 616, + 443 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Manish Kumar Manjhi", + "bbox": [ + 661, + 428, + 841, + 445 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Sourav Kumar", + "bbox": [ + 148, + 446, + 267, + 460 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Prinon Benny", + "bbox": [ + 310, + 446, + 421, + 462 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Himanshu Ghunawat", + "bbox": [ + 465, + 446, + 633, + 460 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "B Sri Sairam Gautam", + "bbox": [ + 676, + 446, + 846, + 460 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Anett Varghese", + "bbox": [ + 308, + 464, + 431, + 479 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abhishek Yadav", + "bbox": [ + 501, + 464, + 633, + 478 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 248, + 515, + 325, + 530 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "This paper presents an overview of NTIRE 2025 the First Challenge on Event-Based Image Deblurring, detailing the proposed methodologies and corresponding results. The primary goal of the challenge is to design an event-based method that achieves high-quality image deblurring, with performance quantitatively assessed using Peak Signal-to-Noise Ratio (PSNR). Notably, there are no restrictions on computational complexity or model size. The task focuses on leveraging both events and images as inputs for single-image deblurring. A total of 199 participants registered, among whom 15 teams successfully submitted valid results, offering valuable insights into the current state of event-based image deblurring. We anticipate that this challenge will drive further advancements in event-based vision research.", + "bbox": [ + 89, + 550, + 483, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 514, + 515, + 643, + 530 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Traditional camera output frames with relatively long exposure time in a fixed framerate. In contrast, event cameras, a kind of neuromorphic sensor, asynchronously capture pixelwise intensity changes with high temporal resolution [12], and have been applied in various fields such as computational imaging [32, 39-41, 43], human pose estimation [2], depth estimation [30, 34], image segmentation [1, 56], etc.", + "bbox": [ + 511, + 544, + 906, + 651 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In recent years, significant efforts have been dedicated to event-based image restoration. Among various tasks, event-based image deblurring has gained the most attention, as the high temporal resolution of event cameras provides valuable priors for motion deblurring [39-41]. Notably, these methods operate under the assumption that input images and events are spatially aligned—a condition that applies to all approaches discussed in this paper.", + "bbox": [ + 511, + 669, + 906, + 790 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In conjunction with the NTIRE 2025 Workshop on New Trends in Image Restoration and Enhancement, the Event-Based Image Deblurring Challenge was organized. The objective is to develop a network architecture or solution that effectively integrates events and images to enhance image deblurring performance. We hope that this challenge will serve as a starting point for promoting event-based image", + "bbox": [ + 511, + 794, + 906, + 901 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.12401v1 [cs.CV] 16 Apr 2025", + "bbox": [ + 22, + 260, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* L. Sun (lei.sun@insait.ai, INSAIT, Sofia University \"St. Kliment Ohridski\"), A. Alfarano, P. Duan, S. Su, K. Wang, B. Shi, R. Timofte, D. P. Paudel, and L. Van Gool were the challenge organizers, while the other authors participated in the challenge.", + "bbox": [ + 89, + 814, + 482, + 863 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Appendix A contains the authors' teams and affiliations.", + "bbox": [ + 91, + 864, + 388, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "NTIRE 2025 webpage: https://cvlai.net/tnire/2025/.", + "bbox": [ + 91, + 876, + 364, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Code: https://github.com/AHupuJR/NTIRE2025_EventDeblur_challenge.", + "bbox": [ + 91, + 888, + 477, + 898 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "enhancement on a broader stage and contribute to the thriving development of the event-based vision community.", + "bbox": [ + 89, + 90, + 480, + 119 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [47], reflection removal in the wild [51], shadow removal [46], event-based image deblurring [42], image denoising [44], XGC quality assessment [27], UGC video enhancement [37], night photography rendering [10], image super-resolution (x4) [4], real-world face restoration [5], efficient super-resolution [36], HR depth estimation [53], efficient burst HDR and restoration [19], cross-domain few-shot object detection [11], short-form UGC video quality assessment and enhancement [22, 23], text to image generation model quality assessment [13], day and night raindrop removal for dual-focused images [21], video quality assessment for video conferencing [16], low light image enhancement [28], light field super-resolution [48], restore any image model (RAIM) in the wild [25], raw restoration and super-resolution [7] and raw reconstruction from RGB on smartphones [8].", + "bbox": [ + 91, + 122, + 482, + 393 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. NTIRE 2025 Event-Based Image Deblurring Challenge", + "text_level": 1, + "bbox": [ + 89, + 407, + 482, + 444 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The goals of this challenge include: (1) promoting research in the area of event-based image deblurring, (2) facilitating comparisons between various methods, and (3) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge, including the dataset, challenge phases and evaluation criteria.", + "bbox": [ + 89, + 452, + 482, + 556 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. Dataset", + "text_level": 1, + "bbox": [ + 89, + 569, + 186, + 583 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The HighREV dataset [40] is used for both training and evaluation in this challenge. It consists of 1,771 sets of blurry images, corresponding events, and sharp images for training. Additionally, 421 sets are provided as validation data during the development phase, ensuring a comprehensive benchmark for assessing model performance.", + "bbox": [ + 89, + 590, + 482, + 681 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2. Tracks and Competition", + "text_level": 1, + "bbox": [ + 89, + 693, + 313, + 709 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The aim is to obtain a network design capable to produce high-quality results with the best performance measured by PSNR for event-based image deblurring.", + "bbox": [ + 89, + 715, + 482, + 761 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Challenge phases Participants were given access to training images from the HighREV dataset. During the validation phase, they could use 421 images from the validation set for model tuning. In the test phase, evaluation was performed on 271 images from the test set. To ensure a fair assessment, the ground-truth images for the test phase remained hidden from participants throughout the challenge.", + "bbox": [ + 89, + 770, + 482, + 876 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/7627b94ddaed6363c5a60fa46edfbcbbd4028fb146c98f1913397dd84eabe46c.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
TeamRankPSNR (primary)SSIM
IVISLAB142.790.9196
MiVideoDeblur242.700.9281
404NotFound342.090.9300
Give_it_a_try440.370.9234
BUPTMM540.210.9179
WEI639.460.9171
DVS-WHU739.260.9101
PixelRevive839.120.9112
CHD938.560.9055
SMU1038.300.9047
JNU6201137.630.9019
colab1236.840.8962
CMSL1331.810.8900
KUnet1429.420.8600
Group101525.930.8200
", + "bbox": [ + 526, + 88, + 895, + 332 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1. Results of NTIRE 2025 Event-Based Image Deblurring Challenge. PSNR and SSIM scores are measured on the 271 test images from HighREV dataset. Team rankings are based primarily on PSNR.", + "bbox": [ + 511, + 344, + 903, + 398 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Evaluation protocol Since the aim of this challenge is to foster the development of accurate event-based image deblurring networks, PSNR and SSIM on the 271 testing images are used as the quantitative evaluation metrics. A code example for calculating these metrics is available at https://github.com/AHupuJR/NTIRE2025_EventDeblurChallenge. The code of the submitted solutions and the pretrained weights are also available in this repository.", + "bbox": [ + 511, + 422, + 903, + 546 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. Challenge Results", + "text_level": 1, + "bbox": [ + 511, + 556, + 687, + 573 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Table 1 shows the final rankings and test results of the participated teams. The implementation details of each team can be found in Sec.4, while team member information can be found in Appendix A. IVISLAB achieved the first place in terms of PSNR, followed by MiVideoDeblur and 404NotFound as the second and third place, respectively.", + "bbox": [ + 511, + 582, + 903, + 674 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Participants", + "text_level": 1, + "bbox": [ + 511, + 680, + 643, + 696 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The challenge attracted 199 registered participants, with 15 teams successfully submitting valid results.", + "bbox": [ + 511, + 702, + 903, + 733 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.2. Main Ideas and Architectures", + "text_level": 1, + "bbox": [ + 511, + 739, + 777, + 755 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Throughout the challenge, participants explored various innovative techniques to improve deblurring performance. Below, we summarize some of the key strategies employed by the top-performing teams.", + "bbox": [ + 511, + 762, + 903, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "1. Hybrid architectures demonstrated strong performance, with all top-3 teams utilizing a combination of transformers and convolutional networks. This approach leverages global features extracted by transformers alongside local features captured by convolutional", + "bbox": [ + 514, + 825, + 903, + 900 + ], + "page_idx": 1 + }, + { + "type": "footer", + "text": "https://www.cvlai.net/ntire/2025/", + "bbox": [ + 114, + 887, + 292, + 898 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "layers, both of which contribute to effective event-based image deblurring. Besides, both spatial and channel attention mechanisms play a crucial role in enhancing overall performance.", + "bbox": [ + 109, + 90, + 482, + 151 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "2. Pretrained weights matters. The winning team, IVISLAB, leveraged a backbone model initialized with pretrained weights from ImageNet, demonstrating the advantages of transfer learning in event-based image deblurring.", + "3. Cross-modal fusion proves beneficial. Several teams adopted EFNet [39] and REFID [40, 41] as a baseline model to fuse features from the event and image branches.", + "4. Effective training strategies. Both the second and third-place teams employed progressive learning techniques during training. Additionally, the winning team utilized a large patch size $(512 \\times 512)$ , which contributed to improved performance.", + "5. Incorporating a novel Mamba-based architecture. Integrating features from both image and event modalities is crucial for enhancing the reconstruction quality of event-based deblurring methods. Team DVS-WHU introduced an innovative Mamba-based architecture to achieve more effective fusion." + ], + "bbox": [ + 89, + 152, + 482, + 452 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Fairness", + "text_level": 1, + "bbox": [ + 89, + 460, + 191, + 476 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To maintain fairness in the event-based image deblurring challenge, specific rules were implemented, primarily regarding the datasets used for training. Participants were permitted to use external datasets for training. However, incorporating the HighREV validation set, whether sharp or blurry images, was strictly prohibited, as this set served to evaluate the overall performance and generalizability of the models. Additionally, the use of HighREV test blurry images for training was not allowed. On the other hand, employing advanced data augmentation techniques during training was considered an acceptable practice.", + "bbox": [ + 88, + 483, + 482, + 650 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4. Challenge Methods and Teams", + "text_level": 1, + "bbox": [ + 89, + 662, + 372, + 679 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1. IVISLAB", + "text_level": 1, + "bbox": [ + 89, + 686, + 202, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To achieve image deblurring, team IVISLAB introduces the Triple Event-stream Image Deblurring Network (TEIDNet). As depicted in Figure 1, TEIDNet converts consecutive events into event voxels at three temporal scales to perceive motion information from blur images and capture fine edges for reconstructing clear images. Furthermore, TEIDNet integrates Shift Window Attention and Channel-Wise Attention blocks to capture local and global contexts, thereby enhancing deblurring accuracy.", + "bbox": [ + 89, + 708, + 482, + 845 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1.1. Network Architecture", + "text_level": 1, + "bbox": [ + 89, + 851, + 290, + 864 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "TEIDNet adopts an encoder-decoder architecture to process images and triple-stream event voxels, aiming to estimate", + "bbox": [ + 89, + 869, + 482, + 900 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/c1b557d47e8506e24eb01ecd1ef1e495035f8ebd33e86e59e2ddd403108412fb.jpg", + "image_caption": [ + "Figure 1. The model architecture of TEIDNet, proposed by Team IVISLAB." + ], + "image_footnote": [], + "bbox": [ + 519, + 90, + 901, + 218 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the deblurred image. Specifically, when deblurring the image at frame $t$ , TEIDNet considers that the long-term event stream surrounding frame $t$ can aid in motion perception. Therefore, it voxelizes the event data from frame $t - T_{l}$ to frame $t + T_{l}$ into a $b$ -bin event voxel $V_{l,t}$ . Simultaneously, since the short-term event stream around frame $t$ can help reconstruct high-frequency textures, TEIDNet voxelizes the event data from frame $t - T_{s}$ to frame $t + T_{s}$ into a $b$ -bin event voxel $V_{s,t}$ . Furthermore, to mitigate color artifacts by leveraging higher-resolution motion information near the current frame, TEIDNet voxelizes the event data from frame $t - T_{m}$ to frame $t + T_{m}$ into a $b$ -bin event voxel $V_{m,t}$ . Subsequently, the event voxels $V_{l,t}, V_{s,t}$ , and $V_{m,t}$ , along with the blur image $I_{b}$ , are concatenated and fed into the network. To effectively fuse the features from the image and event voxels, TEIDNet employs convolutional layers to generate fused feature representations. The network then utilizes a dual-branch encoder. The first, a complex branch extracts high-level semantic information from the fused features by leveraging shift window attention to capture local context and channel-wise attention blocks to capture global context. The second, a simple branch utilizes convolutional layers to capture fine-grained details from the fused features. Next, TEIDNet's decoder integrates multiple shift window attention blocks to fuse and upsample the features extracted by the dual-branch encoder. Finally, convolutional layers are employed to predict the deblurred image $I_{t}$ .", + "bbox": [ + 511, + 282, + 906, + 691 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1.2. Loss Function", + "text_level": 1, + "bbox": [ + 511, + 696, + 658, + 710 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To train TEIDNet, they define a reconstruction loss $\\mathcal{L}_r$ for the estimated deblurred image $I_{t}$ as follows:", + "bbox": [ + 511, + 715, + 903, + 747 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {r} = \\lambda_ {1} \\mathrm {L} _ {1} \\left(I _ {t}, I _ {t} ^ {g t}\\right) + \\lambda_ {2} \\mathrm {L} _ {2} \\left(I _ {t}, I _ {t} ^ {g t}\\right) \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 584, + 756, + 903, + 773 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here, $\\lambda_{1}$ and $\\lambda_{2}$ are coefficients that balance the loss terms. The function $\\mathrm{L}_1(\\cdot ,\\cdot)$ represents the mean absolute error, while $\\mathrm{L}_2(\\cdot ,\\cdot)$ denotes the mean squared error. The term $I_t^{gt}$ refers to the ground truth image at frame $t$", + "bbox": [ + 511, + 784, + 903, + 844 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4.1.3. Implementation Details", + "text_level": 1, + "bbox": [ + 511, + 851, + 723, + 866 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "TEIDNet is implemented using PyTorch on four Nvidia L20 GPUs. During training, a batch size of 16 is utilized, with", + "bbox": [ + 511, + 869, + 906, + 900 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/8611a01662b0ce6655ccee7173684d1e4b03e2302ec0f1864c85d316a26da03d.jpg", + "image_caption": [ + "Figure 2. The framework of DASTF-Net, proposed by Team MiVideoDeblur." + ], + "image_footnote": [], + "bbox": [ + 94, + 88, + 488, + 258 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "input data dimensions of $512 \\times 512$ pixels. The network weights are optimized over 1000 epochs using the AdamW optimizer, with an initial learning rate set to $2 \\times 10^{-5}$ . A cosine annealing scheduler is employed to decay the learning rate progressively. In addition, they take the checkpoint with good performance and perform a second finetune. To mitigate overfitting, data augmentation techniques such as random flipping and rotation are applied. They also initialize the backbone network parameters using weights pretrained on ImageNet. The specific coefficients and parameters are defined as follows: number of bins $b = 7$ , long-term temporal window $T_{l} = 5$ , medium-term temporal window $T_{m} = 1$ , short-term temporal window $T_{s} = 0$ , and loss function weights $\\lambda_{1} = 1$ , $\\lambda_{2} = 1$ .", + "bbox": [ + 88, + 325, + 482, + 537 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. MiVideoDeblur", + "text_level": 1, + "bbox": [ + 89, + 546, + 246, + 561 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Introduction. As illustrated in Fig. 2, their team proposed the Dual Attention Spatio-Temporal Fusion Network(DASTF-Net). Motivated by EFNet [39], their model employs a two-stage encoder-decoder architecture. Initially, two encoders separately extract multi-scale features from both the image and event data. Based on the EGACA module [40] and the FAF module [45], they have designed the Temporal Fusion Residual Block (TFRB) and Multi-Scale Cross-Attention Fusion Block (MSCAFB), which perform feature fusion in the temporal and spatial dimensions, respectively. By incorporating a dual-attention mechanism, these modules effectively enhance the model's performance. Following feature fusion, the fused features are fed into a Restormer [55], which further leverages the feature information to improve the model's performance.", + "bbox": [ + 89, + 568, + 482, + 794 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Training strategy. They employed a four-stage training strategy. In the first stage, the network was trained for 160k iterations using the PSNRLoss function. AdamW Optimizer was used, with an initial learning rate of 2e-4 and a cosine annealing learning rate schedule for updates. Subsequently, in the second stage, data augmentation techniques were introduced, which included adding random Gaussian", + "bbox": [ + 89, + 795, + 482, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "noise and applying random scaling to the input data. Building upon the model from the first stage, the training continued for 80k iterations with an initial learning rate of 1e-4. For the third and fourth stages, the patch size was progressively increased from 256 to 320 and then to 480. The network was trained for 40k iterations in the third stage and 45k iterations in the fourth stage.", + "bbox": [ + 511, + 90, + 903, + 196 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3. 404NotFound", + "text_level": 1, + "bbox": [ + 511, + 209, + 653, + 224 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Their team proposes EV-Deblurformer[26], a framework consisting of two complementary models designed to fully leverage the temporal dynamics of video sequences and the rich texture details present in single images. The framework includes two distinct components: Video-SFHformer, developed for video-based deblurring, and EFSformer, tailored for single-image deblurring. In Video-SFHformer, they introduce STFBlock to enhance the model's capacity for long-range temporal modeling. In EFSformer, they incorporate STEFusionBlock, which fuses event features from the frequency domain to improve spatial detail restoration. To achieve optimal performance, as shown in Section 4.3.3, a sequence-level ensemble strategy is employed to merge the outputs of both models. A progressive training scheme is also adopted to enhance robustness and effectiveness.", + "bbox": [ + 511, + 232, + 906, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3.1. Overall Pipeline", + "text_level": 1, + "bbox": [ + 511, + 484, + 671, + 500 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 3 illustrates the overall architecture of their proposed method, EV-Deblurformer. This approach, built upon the two models: Video SFHformer and EFSformer, fully exploits the rich temporal dynamics and sharp edge information provided by event data. For the video deblurring model, they propose the Video-SFHformer based on SFHformer. For the single-image motion deblurring model, they propose the EFSformer built on EFNet[39].", + "bbox": [ + 511, + 505, + 905, + 626 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.3.2. Implementation Details", + "text_level": 1, + "bbox": [ + 511, + 637, + 723, + 652 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "They implement their proposed network via the PyTorch 2.1.2 platform. Adam optimizer with parameters $\\beta_{1} = 0.9$ and $\\beta_{2} = 0.999$ is adopted to optimize their network. Motivated by [55] they introduce the progressive training strategy. The training phase of their network could be divided into two stages:", + "bbox": [ + 511, + 657, + 905, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "(1) Initial training of EV-Deblurformer. They use a progressive training strategy at first. For the video-based motion deblurring model, they start training with patch size $152 \\times 152$ with batch size of 16 for 250K iterations. The patch size and batch size pairs are updated to $[(192^2, 12), (256^2, 8), (304^2, 8)]$ at iterations [250K, 200K, 150K]. The initial learning rate is $2 \\times 10^{-4}$ and remains unchanged when patch size is 192. Later, the learning rate is set to $1 \\times 10^{-4}$ and $7 \\times 10^{-5}$ for patch and batch size pairs of $(256^2, 8)$ and $(304^2, 8)$ , respectively. They employ a", + "bbox": [ + 511, + 750, + 906, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/9f72a3b5343b2954180c60b89da07dea92ecc4de71c2198cbf209ef89d3a6ac9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 133, + 85, + 455, + 281 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/d4312036562aa983d2f712981c79dd05fea8c0c2fd66cd31016a474e139d5a33.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 460, + 85, + 861, + 281 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a3e30ae4e1827997e27cf7621f6fffe30946062565c228e2edb467ce551cb086.jpg", + "image_caption": [ + "Figure 3. The architecture diagram of EV-Deblurformer, proposed by Team 404NotFound, is designed for event-guided motion deblurring." + ], + "image_footnote": [], + "bbox": [ + 133, + 286, + 859, + 479 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "cosine annealing learning rate decay strategy, gradually reducing the learning rate. For the single-image-based motion deblurring model, They begin training with a patch size of $192 \\times 192$ and a batch size of 12 for 250K iterations. During training, patch size and batch size pairs are progressively updated to $(256^{2}, 10)$ , $(288^{2}, 8)$ , and $(320^{2}, 8)$ at 36K, 24K, and 24K iterations, respectively. The initial learning rate is set to $5 \\times 10^{-4}$ , and later adjusted to $1 \\times 10^{-4}$ , $7 \\times 10^{-5}$ , and $5 \\times 10^{-5}$ corresponding to the updated patch and batch size configurations. A cosine annealing schedule is employed to gradually decay the learning rate throughout the training process. The first stage is performed on the NVIDIA RTX 4090 GPU. They obtain the best model at this stage as the initialization of the second stage.", + "(2) Fine-tuning EV-Deblurformer. For the video-based motion deblurring model, they start training with a patch size of $320 \\times 320$ and a batch size of 4 for 150K iterations. The initial learning rate is set to $1 \\times 10^{-5}$ and is adjusted to $1 \\times 10^{-7}$ using a cosine annealing schedule, over a total of 150K iterations. They use the entire training data from the challenge without applying any data augmentation techniques. The exponential moving average (EMA) is employed for the dynamic adjustment of the model parameters. For the single-image-based motion deblurring model, they" + ], + "bbox": [ + 88, + 531, + 486, + 902 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "adopt the same training strategy as used in the video-based motion deblurring model. The second training stage is conducted on an NVIDIA RTX 4090 GPU.", + "bbox": [ + 511, + 531, + 906, + 575 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "(3) Evaluation Metrics They utilize two widely adopted reference-based evaluation metrics—Peak Signal-to-Noise Ratio (PSNR) and Structural Similarity Index Measure (SSIM)[49]—to evaluate the effectiveness of their method, following prior works[3, 24, 54, 55]. Higher PSNR and SSIM values generally reflect better performance in image restoration tasks.", + "bbox": [ + 511, + 578, + 908, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.3. Ensemble Strategies", + "text_level": 1, + "bbox": [ + 511, + 696, + 702, + 713 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Ensemble learning has been proven to be an effective technique in image restoration. Its most basic application involves integrating the outputs of multiple models and applying a fusion strategy to achieve results with better generalization and greater stability in restoration quality.", + "bbox": [ + 511, + 717, + 906, + 794 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The HighREV-test dataset consists of four sequences. Among them, one is an outdoor scene, which differs markedly from the other three in terms of object diversity, texture richness, and color composition. Based on this observation, they explore a sequence-level ensemble strategy that selectively exchanges outputs between Video-SFHformer and EFSformer.", + "bbox": [ + 511, + 795, + 908, + 900 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/53355ba7ec22077eb962d8f0e9c27950cccf02c59a1dd759a83c4d3ea57b20ea.jpg", + "image_caption": [ + "Figure 4. An overview of the method proposed by Team BUPTMM: They set the weights for the fusion, with $\\alpha$ set to 0.6 and $\\beta$ to 0.4." + ], + "image_footnote": [], + "bbox": [ + 109, + 88, + 464, + 157 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Specifically, they start with the best-performing Video-SFHformer model and replace the output of the outdoor sequence in the HighREV-test set with the corresponding result generated by EFSformer. The results in Table 1 show that their approach yields the best performance, achieving the highest SSIM score and ranking third overall in the NTIRE Event-Based Image Deblurring Challenge.", + "bbox": [ + 89, + 242, + 483, + 349 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Give_it_a_try", + "text_level": 1, + "bbox": [ + 89, + 368, + 225, + 383 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4.1. General method", + "text_level": 1, + "bbox": [ + 89, + 393, + 250, + 407 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "This submission is mainly based on the public code of another team. Models used in this submission are EFNet att track fusion and EFNet att track fusion new, which can be found atarchs orarchs/tested. They change the training strategy, finetune the models and combine two best models to push the limits of scoring.", + "bbox": [ + 89, + 416, + 483, + 507 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- How event modality is utilized in the deblurring process: They used the given SCER format event voxels in training, validating and training. The usage is as same as original EFNet [39] since new networks retain the encoder module of the baseline.", + "bbox": [ + 91, + 516, + 483, + 590 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4.2. Implementation details", + "text_level": 1, + "bbox": [ + 89, + 609, + 297, + 625 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- Training:", + "text_level": 1, + "bbox": [ + 91, + 628, + 171, + 643 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In the first stage of training, all models are trained for $2 \\times 10^{5}$ iterations with a batch size of 16 by PSNR loss function with AdamW optimizer. In each training batch, each paired images and event voxel are randomly cropped to $256 \\times 256$ and augmented by random flipping and rotation. The learning rate is initialized as $3 \\times 10^{-4}$ , and a cosine annealing scheduler is used to drop the final learning rate as $10^{-7}$ . They finetuned the models obtained from the first stage with a patch size of $512 \\times 512$ . At this stage, all models are trained for another $2 \\times 10^{5}$ iterations with a batch size of 4 and the learning rate drop from $2 \\times 10^{-5}$ to $10^{-6}$ . Models are validated for every $10^{4}$ iterations. Other settings remain unchanged.", + "bbox": [ + 102, + 643, + 482, + 839 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- Validating and Testing:", + "text_level": 1, + "bbox": [ + 91, + 840, + 261, + 854 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "They chose the highest validated models for each network during the fine-tuning stage and average two models' output as final result to improve robustness.", + "bbox": [ + 102, + 854, + 482, + 900 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.5. BUPTMM", + "text_level": 1, + "bbox": [ + 513, + 90, + 633, + 104 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.5.1. Architecture", + "text_level": 1, + "bbox": [ + 513, + 113, + 647, + 127 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our solution is built on EFNet[39] and STCNet[52]. Inspired by [50], they introduce a detail enhancement module that follows the EFNet prediction stage. The whole pipeline is illustrated in Fig. 4. The detail enhancement module adopts a simple U-Net structure.", + "bbox": [ + 511, + 133, + 905, + 209 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.5.2. Implementation Details", + "text_level": 1, + "bbox": [ + 511, + 220, + 723, + 234 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Both EFNet and STCNet are initialized with pre-trained GoPro checkpoints. They fine-tune them separately using the NTIRE official training dataset without additional data, aside from the pre-trained GoPro weights. The patch size is set to $1024 \\times 1024$ , and they employ the CosineAnnealingLR scheduler to adjust the learning rate.", + "bbox": [ + 511, + 239, + 905, + 330 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The key differences in the training strategies for EFNet and STCNet are as follows:", + "bbox": [ + 511, + 332, + 905, + 359 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For EFNet, they train EFNet for 100k iterations with a batch size of 4 using 4 NVIDIA H800 GPUs. The optimizer is AdamW with an initial learning rate of 2e-4. They generate the event voxel grid following the official script, setting the bin size to 24. Due to differences in the event encoder's channel size, they extended the pre-trained GoPro checkpoint weights from 6 to 24 bins. The loss function consists of the L1 loss, the Charbonnier loss, and the Sobel loss, with respective weights of 1.0, 0.5, and 0.5. Unlike the official EFNet implementation, they do not apply a mask between the two stages.", + "bbox": [ + 511, + 363, + 905, + 527 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ForNet, they train STCNet for 1000 epochs with a batch size of 8 using 4 NVIDIA H800 GPUs. The optimizer is Adam with an initial learning rate of 2e-4. They use the official event voxel grid with a bin size of 6. The loss function is the Charbonnier loss.", + "bbox": [ + 511, + 530, + 905, + 604 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.6. WEI", + "text_level": 1, + "bbox": [ + 513, + 619, + 586, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Since REFID [40] is an excellent method of event-based blurry video frame interpolation (VFI), considering the differences in modeling image deblurring and VFI problems, they adapt the REFID structure to fit the image deblurring challenge. As shown in Fig. 5, they develop a Bi-directional Gathered Recurrent Network (BGRN) for event-based image deblurring.", + "bbox": [ + 511, + 641, + 905, + 748 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.6.1. Network Architecture", + "text_level": 1, + "bbox": [ + 511, + 758, + 712, + 772 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Following REFID [40], the events within the exposure time $(t - \\Delta t\\to t + \\Delta t)$ are represented as a voxel grid $V_{t - \\Delta t\\rightarrow t + \\Delta t}\\in \\mathbb{R}^{(M + 1)\\times H\\times W}$ , where $M$ is set to 9. Furthermore, they divide the voxel $V_{t - \\Delta t\\rightarrow t + \\Delta t}$ into two segments $V_{t - \\Delta t\\rightarrow t}$ and $V_{t + \\Delta t\\rightarrow t}$ to perform forward and backward iterations, respectively.", + "bbox": [ + 511, + 777, + 905, + 869 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The BGRN consists of image and event branches. Only a blurry image $B_{t}$ is fed into the image branch, and the", + "bbox": [ + 511, + 869, + 905, + 900 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9f8be6c650b99795b0cdb6d83e07801f8311e1e0282f1bf2309fd93b92bb1313.jpg", + "image_caption": [ + "Figure 5. The architecture of the Bi-directional Gathered Recurrent Network (BGRN), proposed by Team Wei, is designed for event-based image deblurring and serves as an enhanced reconfiguration network for REFID. [40]. \"EVR Block\": event recurrent block [40], \"EGACA\": event-guided adaptive channel attention [40], \"SConv\": stripped convolution, \"TConv\": transposed convolution, \"Bi-Fusion\": bidirectional fusion." + ], + "image_footnote": [], + "bbox": [ + 109, + 85, + 890, + 385 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "network output is the corresponding sharp image $\\hat{I}_t$ . Besides, they split the original event branch into a forward recurrent branch and a backward recurrent branch, which respectively and recurrently consumes sub-voxels of forward event voxel $V_{t - \\Delta t\\to t}$ and backward event voxel $V_{t + \\Delta t\\rightarrow t}$ in a gathered way. In each recurrent iteration, the sub-voxel $V_{sub}\\in \\mathbb{R}^{2\\times H\\times W}$ is fed to the event branch, which encodes the event information for the latent frame. To fuse the features obtained from forward and backward recurrent branching, the outputs of both directions are fed into a channel cascade and $1\\times 1$ convolution at each scale (\"Bi-Fusion\" in Fig. 5). Then, they are added element by element with the features of the corresponding scale of the decoder. In addition, to reduce redundancy, they removed the recurrent structure of the decoder section and replaced it with residual blocks. Finally, to make the network learn high-frequency information, the output of the last residual block and the initial features of the blurred image are added element by element, and then the sharp image $\\hat{I}_t$ is obtained through a $3\\times 3$ convolution.", + "bbox": [ + 91, + 473, + 483, + 777 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.6.2. Implementation details", + "text_level": 1, + "bbox": [ + 89, + 789, + 299, + 806 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training strategy. They train BGRN with the HighREV training dataset specified by the organizer with a batch size of 4 for 200k iterations on an NVIDIA GeForce RTX 3090 GPU. They crop the input images and event voxels to $256 \\times 256$ for training and use horizontal and vertical flips for data enhancement. AdamW [29] with an initial learning", + "bbox": [ + 89, + 809, + 485, + 902 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "rate of $2 \\times 10^{-4}$ and a cosine learning rate annealing strategy with $1 \\times 10^{-7}$ as the minimum learning rate are adopted for optimization. They use a PSNR loss [39] as supervision. Ensemble strategy. During testing, they found that images prefixed with \"zigzag\" showed a large difference in brightness compared to other normal images. To adapt to this sudden change in brightness, they select images with the prefix \"sternwatz_window\" similar to this scene from the training set. Then, they double their brightness to fine-tune the pre-trained BGRN model for 5k iterations with an initial learning rate of $2 \\times 10^{-5}$ . Therefore, the ensemble strategy is applied when testing, i.e., the abnormally bright images (prefixed with \"zigzag\") are processed with the fine-tuned model, and the others are processed with the initial pretrained model.", + "bbox": [ + 511, + 474, + 906, + 700 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.7.DVS-WHU", + "text_level": 1, + "bbox": [ + 511, + 708, + 637, + 723 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.7.1. Network Architecture", + "text_level": 1, + "bbox": [ + 511, + 729, + 712, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Positioned at Fig. 6, the proposed Dual Channel Cross-modal Mamba (DCCM) architecture comprises three primary components: two Shallow Feature Extraction (SFE) modules, a series of $N$ dual channel blocks (with $N = 20$ in their experimental configuration), each containing two Residual Dense Blocks (RDB) [57] and two Cross Modal Mamba (CMM) [14] blocks, and a Global Feature Fusion (GFF) module. Initially, both blur image and events (represented in 24-bin voxel grids) are processed through the SFE module for preliminary feature extraction. Subsequently,", + "bbox": [ + 511, + 750, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/c721fffa4b1c80802496768db4fde67eeb3302413c722054c2d6fddc7e8ce50f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 102, + 89, + 467, + 241 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8c4442be526dca917ebfb53309df0e69db477c4ded87f55e59353dfeadb1e786.jpg", + "image_caption": [ + "Figure 6. Architecture of DCCM, proposed by Team DVS-WHU." + ], + "image_footnote": [], + "bbox": [ + 96, + 243, + 478, + 354 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the dual channel blocks facilitate in-depth feature extraction and cross-modal interaction. Finally, the GFF module synthesizes the ultimate latent sharp image.", + "bbox": [ + 89, + 406, + 482, + 450 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The core concept of their network is to establish a mutual compensatory relationship between the features derived from event data and those from blurred images through a dual-channel framework. Specifically, while event data are often characterized by significant noise, images typically exhibit lower noise levels. The CMM block is employed to incorporate image features into the event data, thereby mitigating the noise present in the events. Conversely, event data are rich in sharp edge information, and the CMM block also facilitates the integration of event features into blurred images, ultimately contributing to the deblurred result.", + "bbox": [ + 88, + 450, + 482, + 618 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.7.2. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 625, + 302, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The network is created with PyTorch and trained on two NVIDIA GeForce RTX 3090 GPUs for 150 epochs with ground-truth-guided L1 norm loss. The training process is composed of two phases. During the first phase, they follow the strategy of Cheng et al.[6] and pretrain their DCCM on the mixed dataset including synthetic REDS dataset[35] and semi-synthetic HQF dataset[38] with a learning rate fixed at $1 \\times 10^{-4}$ for 50 epochs. In the second phase, the network is fine-tuned on the HighREV dataset[40] where the images are randomly cropped into $256 \\times 256$ patches with horizontal flipping for data augmentation and the learning rate linearly decays to $1 \\times 10^{-5}$ until the 150th epoch.", + "bbox": [ + 89, + 643, + 482, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.8. PixelRevive", + "text_level": 1, + "bbox": [ + 89, + 833, + 218, + 848 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The model they used was the same as the EFNet[39]. The key to the improved performance of their model lied in the utilization of additional datasets during training and", + "bbox": [ + 89, + 854, + 482, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the adoption of larger image sizes in the final fine-tuning phase. They employed a two-stage training strategy. First, they used an Events Simulator called V2E[15] to generate Events from REDS dataset. To generate the dataset, they used timestamp resolution as 0.001, dvs exposure duration as 0.001. The remaining parameters were configured identical to those specified in the V2E paper. They get over 20,000 pairs of events, blur images and sharp images. They trained the model on REDS for 250,000 iters, with gt_size 256, patch size 8. When training on simulated datasets with the HighREV validation set, they observed a paradoxical divergence: while the training PSNR consistently improved, the validation PSNR exhibited a decline. This counterintuitive phenomenon may stem from distributional discrepancies between synthetic data and HighREV characteristics across multiple feature dimensions.", + "bbox": [ + 511, + 90, + 903, + 330 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Then, they finetuned it on HighREV train dataset for 200,000 iters, with gt_size 512, patch size 8. The True-CosineAnnealingLR scheduler was employed in both training phases, configured with a period matching the total training iterations and a minimum learning rate value of 1e-7. After experiments, they found that larger gt_size can improve the PSNR by about 0.5. Experiments showed performance decreases when gt_size exceeds 512 (tested range: 256-608), making 512 the optimal size. Other strategy is same as the EFNet.", + "bbox": [ + 511, + 332, + 906, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.9. CHD", + "text_level": 1, + "bbox": [ + 511, + 491, + 591, + 506 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As illustrated in Fig. 7, team CHD develops an efficient Event-Image Deblurformer Network (EIDFNet) based on the Restormer architecture [55]. To address the computational bottleneck encountered when restoring high-resolution blurry images using event data, they incorporate key design elements from EFNet [39].", + "bbox": [ + 511, + 513, + 905, + 604 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.9.1. Network Architecture", + "text_level": 1, + "bbox": [ + 511, + 609, + 714, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Considering the speed of model training, they still used the official 6-channel voxel grid event representation to achieve a balance between efficiency and precision. They input the blurred image and the event representation with consistent spatial resolution into the network and employ the modified Transformer Block to fuse the cross-modal feature. Firstly, they modify the transformer block in Restormer [55] as a fusion module to achieve full interaction between different feature channels by setting the number of input and output dims in the GDFN and adding $1 \\times 1$ convolution in the residual connections. Additionally, they build a mutually enhanced fusion encoder based on the Event-Image CrossModal Attention Fusion Module (EICA) proposed in EFNet [39]. The enhanced image features are obtained using K and V derived from event embeddings, while Q is sourced from image embeddings. Conversely, the enhanced event features are generated with K and V originating from image embeddings, with Q being drawn from event embeddings.", + "bbox": [ + 511, + 628, + 906, + 901 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/45b31251263c662d830970b24c8d4673eaabe0b4c9988af96695c9a7b73ede74.jpg", + "image_caption": [ + "Figure 7. The framework of Event-Image Deblurformer Network (EIDFNet), proposed by Team CHD." + ], + "image_footnote": [], + "bbox": [ + 91, + 88, + 477, + 426 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In order to achieve comprehensive integration of event and image features, the enhanced image features and enhanced event features are concatenated along the channel dimension. Subsequently, these concatenated features are fused using a Modified Transformer Block. Ultimately, each encoder produces enhanced image features, enhanced event features, and fused features. The enhanced event and image features undergo downsampling before being input into the subsequent encoder. The fusion feature is directly linked to the corresponding decoding feature through a skip connection.", + "bbox": [ + 88, + 508, + 482, + 675 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.9.2. Training Strategy", + "text_level": 1, + "bbox": [ + 89, + 707, + 261, + 722 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "They perform progressive learning strategy flow the settings in Restormer [55] and trained the model on a A100 GPU with L1 loss. The network is trained on smaller image patches in the early epochs and on gradually larger patches in the later training epochs. During the training process, the batch sizes are [4,3,2,2,1,1], and the patch sizes are [128,160,192,256,320,384] with the iterations are [92000,64000,48000,36000,36000,24000]. They employ the AdamW optimizer with an initial learning rate 3e-4 that follows a CosineAnnealingRestartCyclicLR decay strategy.", + "bbox": [ + 88, + 734, + 482, + 902 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b9251cf46272ae3008bd4f011fa63a877d1c023a96ebea5494bbf7f13e3a452f.jpg", + "image_caption": [ + "Figure 8. Overview of the proposed pipeline by Team SMU." + ], + "image_footnote": [], + "bbox": [ + 522, + 94, + 897, + 290 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.10. SMU", + "text_level": 1, + "bbox": [ + 513, + 347, + 599, + 361 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.10.1. Motivation", + "text_level": 1, + "bbox": [ + 513, + 369, + 643, + 383 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Inspired by recent successes in cross-knowledge sharing between events and RGB frames [39], hierarchical temporal and frequency modelling [18, 40] and stage-wise fine-fusion [20] for the task of event-based RGB deblurring, they propose to modify the base EFNet model [39] such that the modified model serves as a unified framework which (1) iteratively fine-tunes the coarser deblurred images through two stages of extensive fine-fusion to combat the insufficiencies of the existing decoding techniques while (2) can optionally be made to be specifically aware of propagated frequency information in latent representations to locally and globally filter the blur features in the RGB images through leveraging event features in the frequency domain.", + "bbox": [ + 511, + 388, + 903, + 584 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In addition, to the best knowledge, none of the existing methods for event-based RGB deblurring recognizes the importance of feature tracking in this task which can be beneficial especially in challenging conditions such as high contrast (i.e. very bright or dark surroundings) and fast motion (i.e., large pixel displacements within an accumulated event volume) scenarios [33] towards robust performance. To address this limitation, they explicitly employ a data-driven feature tracking module in the pipeline, an inline feature tracker block, such that event feature tracks corresponding to different points in the reference RGB frame are intuitively incorporated in the learning process specifically in the initial stages of the unified framework.", + "bbox": [ + 511, + 585, + 905, + 781 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.10.2. Network Architecture", + "text_level": 1, + "bbox": [ + 511, + 791, + 720, + 804 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As depicted in Fig. 8, they propose three main modifications: the inline feature tracker module, bidirectional frame fusion and AdaRevD refinement, to the original EFNet, backed by the motivation as described in section 4.10.1 and validated through the experiments. To this end, they design the inline feature tracker such that the latent RGB and event", + "bbox": [ + 511, + 810, + 905, + 898 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "features are merged and learned through a flow autoencoder block in combination with a Conv-LSTM block to retrieve the temporal alignment of features. Furthermore, it is to be noted that they place the tracker at an initial stage of the pipeline to ensure that the tracker has the access to the high-level features of each modality, rather than the deeper low-level features, since high-level features, which are close to the input data, are more promising to contain information on temporal propagation, which is critical for co-aligned feature tracking.", + "bbox": [ + 89, + 90, + 480, + 241 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Inspired by [20], they design the first stage of refinement using a bidirectional frame fusion block, specifically targeting the spatiotemporal information flow between adjacent coarse frames while in the second stage of refinement, they further refine the output from the first refinement stage with an objective to identify the still remaining degradation patterns in the RGB space and tackle them using an adaptive patch exiting reversible decoder module [31]. Optionally, to implement the frequency-based filtering of blur features, they follow the cross-modal frequency (CMF) module proposed by [18] such that latent representations at each level of the first U-Net are passed through CMF modules, and concatenated in the decoder levels, in a hierarchical fashion to enhance the latent feature representations with frequency-aware characteristics.", + "bbox": [ + 89, + 242, + 482, + 468 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.10.3. Implementation Details", + "text_level": 1, + "bbox": [ + 89, + 474, + 308, + 488 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "They train the models using one NVIDIA 3090 GPU machine in two stages: (1) primary event-RGB fusion pipeline including the proposed frequency-aware module, explicit feature tracking and the first iteration of refinement based on the bidirectional frame fusion block and (2) second iteration of refinement based on AdaRevD framework [31].", + "bbox": [ + 89, + 492, + 480, + 583 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "By following the baseline implementation [39], they train the models on the HighREV dataset, in both stages, with an initial learning rate of $2 \\times 10^{-4}$ for a total of $2 \\times 10^{4}$ iterations. The utilized optimizer is AdamW [29] and the learning objective is set to be PSNR loss [39].", + "bbox": [ + 89, + 584, + 480, + 659 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.11.JNU620", + "text_level": 1, + "bbox": [ + 89, + 666, + 197, + 681 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As shown in Fig. 9, their framework adopts EFNet [39] as the baseline architecture. To enchance frequency-aware feature processing, a selection frequency block (SF Block) [9] is integrated following each decoder. The architecture introduces two key components: 1) A multi-branch dynamic selection frequency (MDSF) module that adaptively decouples feature mappings into distinct frequency components through dynamic convolution operations; 2) A multi-branch compact selection frequency (MCSF) module specifically designed to expand the receptive field for processing degraded blurry images. Multiple data augmentation strategies were employed, including horizontal and vertical shiftings. For data preparation, they implemented multiple augmentation strategies including horizontal and vertical spa", + "bbox": [ + 89, + 688, + 482, + 900 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "tial shifts. The model was trained for 120,000 iterations on an NVIDIA GeForce RTX 3090 GPU with a batch size of 4. The models were optimized by the Adam method with $\\beta_{1} = 0.9$ and $\\beta_{2} = 0.99$ and the weight decay was set to $10^{-4}$ . The initial learning rate was set to $2 \\times 10^{-4}$ , gradually decreased following a cosine annealing schedule. In inference phase, each test image undergoes augmentation through horizontal and vertical flips before input into the model. The final restored image is generated by averaging all augmented outputs.", + "bbox": [ + 511, + 90, + 903, + 242 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.12. colab", + "text_level": 1, + "bbox": [ + 511, + 253, + 601, + 268 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Our team proposes an improved method based on EFNet, named DEFNet (Dynamic Enhanced Fusion Network). This method incorporates three key enhancements. First, we introduce a multi-scale dynamic fusion module, which fuses event and image features at multiple spatial resolutions, significantly improving the restoration of fine details in blurred areas[17]. Second, we enhance the original EICA module by integrating a bidirectional attention mechanism, enabling more effective mutual guidance and interaction between image and event features. Third, for processing event data, we adopt a weighted interpolation strategy[40] that models the dynamic weighting of event sequences more accurately, thereby enriching the temporal details provided to the image restoration process.", + "bbox": [ + 511, + 277, + 903, + 489 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.12.1. Network", + "text_level": 1, + "bbox": [ + 511, + 500, + 627, + 513 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Fig. 10 presents the architecture of DEFNet, which is built upon EFNet and incorporates the newly introduced modules: the multi-scale dynamic fusion module and the enhanced EICA module with a bidirectional attention mechanism. These components work collaboratively to optimize the motion deblurring process by improving feature representation and fusion between the image and event data.", + "bbox": [ + 511, + 520, + 903, + 626 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "During the deblurring process, event streams are used to provide fine-grained temporal variation information that guides the restoration of motion blur in image frames. Specifically, the Symmetric Cumulative Event Representation (SCER) encodes the temporal distribution of events while the enhanced Event-Image Cross-modal Attention Fusion (EICA) module leverages bidirectional attention to facilitate deeper interaction between modalities. Additionally, the integration of weighted interpolation improves the temporal alignment and accuracy of event feature extraction. Together, these components enable DEFNet to more effectively restore motion-blurred images by enhancing edge sharpness, preserving texture, and capturing motion dynamics with higher fidelity.", + "bbox": [ + 511, + 628, + 905, + 839 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.12.2. Implementation Details", + "text_level": 1, + "bbox": [ + 511, + 849, + 732, + 864 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We use the AdamW optimizer with an initial learning rate of 2e-4, weight decay of 1e-4, and betas set to [0.9, 0.99].", + "bbox": [ + 511, + 869, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/631527531248f11e51e7497f1fac20dcc126570e11b65b294387f6222f8be17d.jpg", + "image_caption": [ + "Figure 9. The model framework proposed by Team JNU620." + ], + "image_footnote": [], + "bbox": [ + 178, + 93, + 816, + 305 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/94f49739a4b00f51de806981019b97f06eafd67affb2ff59cff77e96de1c28d9.jpg", + "image_caption": [ + "Figure 10. DEFNet architecture, proposed by Team colab." + ], + "image_footnote": [], + "bbox": [ + 155, + 357, + 415, + 674 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To dynamically adjust the learning rate, we used the True-CosineAnnealingLR scheduler with a maximum iteration count of T_max = 200000 and a minimum learning rate of 1e-7. During training, the batch size was set to 4, and 3 worker threads were used per GPU. The total number of training iterations was set to 40000. This method was trained and validated on the HighREV dataset. The model achieved significant improvements on both the training and validation sets, with PSNR and SSIM used as evaluation metrics during training. Validation was performed every 10,000 iterations, and the model was regularly saved.", + "bbox": [ + 89, + 734, + 483, + 900 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "4.13.CMSL", + "text_level": 1, + "bbox": [ + 511, + 359, + 609, + 373 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The Cascade Event Deblurring Model With Event Edge Loss was built based on EFNet [39]. An motion edge loss and a cascade framework were introduced to enhance the performance of EFNet.", + "bbox": [ + 511, + 381, + 905, + 441 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The EFNet backbone was adopted and two improvements were proposed. Firstly, the event data were organized and represented as voxel [39]. Then, two frame of the event voxels that were most close to the center of the exposure time were multiplied to produce a motion edge frame. The motion edge frame contains the edge of the moving objects in the current frame as shown in fig. 11, fig. 12 is the corresponding edge of the ground truth image (sharp image). As shown in fig. 11 and fig. 12, the motion edge contains clear lines that were consistent with the true edges and could served as a guiding information for image deblurring. The edge of the deblured image output by the module should be similar to the motion edge. Therefore, a motion edge loss were proposed as follow:", + "bbox": [ + 511, + 441, + 906, + 654 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\ell_ {e d g e} = \\operatorname {m s e} (e d g e (\\widehat {x}) \\cdot m, e)\n$$\n", + "text_format": "latex", + "bbox": [ + 611, + 669, + 807, + 686 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nm _ {i, j} = 1 \\quad \\text {i f} \\quad e _ {i, j} > \\tau , \\quad \\text {e l s e} \\quad 0\n$$\n", + "text_format": "latex", + "bbox": [ + 596, + 695, + 823, + 710 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $\\mathrm{mse(A,B)}$ is the mean squared error between each element in matrix A and B, $\\widehat{x}$ is the output deblured image, e is the motion edge frame, m is the motion edge mask, $\\tau$ is the threshold parameter.", + "bbox": [ + 511, + 719, + 905, + 779 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Secondly, a cascade frame work were proposed that two EFNet was connected in cascade to further enhance the image deblurring ability. The first EFNet took four frames of the event voxels that were relatively remote to the center of the exposure time while the second EFNet took two frames of the event voxels that were relatively close to the center of the exposure time. The two EFNet form a coarse-fine paradigm that gradually remove the motion delur.", + "bbox": [ + 511, + 780, + 906, + 901 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/90e21bbf3f89174ac0e0fa957469dd6db0f60d186023d4e39a2ba972e14a96e5.jpg", + "image_caption": [ + "Figure 11. The visualization of the motion edges." + ], + "image_footnote": [], + "bbox": [ + 99, + 93, + 475, + 313 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/0cfd0d4be9e6d4f6281d40f20feff55f33ef42523e6946f552f2758c4155dabf.jpg", + "image_caption": [ + "Figure 12. The edges in the ground truth frame" + ], + "image_footnote": [], + "bbox": [ + 99, + 366, + 475, + 584 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.14. KUnet", + "text_level": 1, + "bbox": [ + 89, + 643, + 187, + 657 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.14.1. Architecture", + "text_level": 1, + "bbox": [ + 89, + 667, + 233, + 681 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Their solution is built upon a custom KUnet backbone tailored for event-based image deblurring. The model employs a dual-encoder strategy that separately processes RGB images and voxelized event data, each through a dedicated encoder branch. At the bottleneck, the features are fused via channel-wise concatenation and passed through a transformer module.", + "bbox": [ + 89, + 686, + 482, + 792 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A key novelty in the design is the use of KANLinear layers within the transformer block. These layers, based on spline-interpolated kernels, improve attention expressiveness without adding significant computational overhead. This fusion architecture leverages the temporal sharpness of events with the spatial-semantic richness of RGB images to produce high-fidelity deblurred outputs.", + "bbox": [ + 89, + 795, + 482, + 902 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/e84d3aa1ada2855602975b3faa64b193f7dfa02538dc62e0653e4b2bc050f2da.jpg", + "image_caption": [ + "Figure 13. Left: Input blurry frame. Right: output of KUnet, with detailed texture." + ], + "image_footnote": [], + "bbox": [ + 532, + 88, + 707, + 191 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/d8e8b79c88d3c5152418bdc02984efc800208515ce23ccfbc32ffe188180b135.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 710, + 89, + 885, + 191 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.14.2. Implementation Details", + "text_level": 1, + "bbox": [ + 511, + 257, + 732, + 272 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "They train the model from scratch on the official NTIRE 2025 HighREV dataset without any external data or pretrained weights. The voxelized events are represented using 6 temporal bins, generating a 6-channel input tensor for the event encoder.", + "bbox": [ + 511, + 276, + 905, + 349 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Training was conducted using 2 NVIDIA A100 GPUs with a batch size of 8 and a patch size of $256 \\times 256$ . They trained the network for 150k iterations using the AdamW optimizer ( $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.99$ , weight decay = 1e-4) and a CosineAnnealingLR scheduler. Data augmentations included random horizontal flips and rotations.", + "bbox": [ + 511, + 352, + 905, + 443 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The loss function includes a PSNR loss weighted at 0.5. Their final checkpoint achieved a peak PSNR of 29.42 on the NTIRE 2025 validation phase.", + "bbox": [ + 511, + 443, + 903, + 488 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Inference was performed using a sliding window approach with a max minibatch size of 8. They observed an inference time of $\\sim 0.15$ seconds per frame on an A100 GPU, and a memory footprint of approximately 16 GB during training.", + "bbox": [ + 511, + 489, + 905, + 565 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Model Complexity:", + "text_level": 1, + "bbox": [ + 532, + 565, + 671, + 579 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "Parameters: 11M", + "FLOPs: Not computed", + "- GPU Memory Usage: 16 GB (training)", + "Inference Time: 0.15s/frame" + ], + "bbox": [ + 514, + 580, + 792, + 638 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Code and Resources:", + "text_level": 1, + "bbox": [ + 532, + 641, + 684, + 654 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "- GitHub: https://github.com/Splendor73/NTIRE2025_EventDeblur_challenge_asu", + "- Pretrained: https://www.dropbox.com/scl/fi/19td2xtbzxed2bg8tc9w0/17_KUnet.zip", + "- Results: https://www.dropbox.com/scl/fi/yrky29x2mdwt3k8e40yol/Results.zip" + ], + "bbox": [ + 514, + 656, + 905, + 748 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "4.15. Group10", + "text_level": 1, + "bbox": [ + 511, + 756, + 629, + 773 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The solution is built upon a custom adaptation of the EFNet deblurring framework[39]. The method strategically harnesses both conventional image data and event-based information to mitigate motion blur effectively. Key components of the approach include:", + "bbox": [ + 511, + 779, + 905, + 854 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Dual-Stream Network Architecture: The model consists of parallel convolutional streams. One stream processes the blurry input image, while the other processes event data,", + "bbox": [ + 511, + 854, + 905, + 901 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "which is converted into a voxel grid representation. A cross-modal attention module subsequently fuses the features extracted from both modalities, enhancing the network's ability to recover fine details in dynamic scenes.", + "bbox": [ + 89, + 90, + 480, + 151 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Event Data Representation: The raw event data - comprising spatial coordinates, timestamps, and polarity - is transformed into a voxel grid. This process involves temporal normalization and spatial mapping, enabling the network to capture the dynamic nature of motion events with high precision.", + "bbox": [ + 89, + 152, + 480, + 243 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Training Strategy: Utilizing mixed precision training to maximize GPU efficiency and accelerate the convergence process. Gradient accumulation is employed to effectively simulate a larger batch size, which is critical for stable training on high-resolution data. The training loss is computed using the Mean Squared Error (MSE) criterion, guiding the network to produce high-quality deblurred images.", + "bbox": [ + 89, + 244, + 480, + 349 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Data Pipeline: Custom PyTorch Dataset classes handle the loading and preprocessing of both image and event data. The pipeline includes resizing, normalization, and careful synchronization between blurry images and their corresponding event data, ensuring data consistency across modalities.", + "bbox": [ + 89, + 349, + 480, + 439 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Performance Evaluation: The evaluation strategy employs widely accepted metrics such as PSNR and SSIM to quantify restoration quality. Test outputs are resized to their original dimensions and saved as lossless PNG images to preserve the fidelity of the results.", + "bbox": [ + 89, + 441, + 480, + 517 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Additional details include:", + "bbox": [ + 109, + 518, + 287, + 532 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Parameter Count: The EnhancedEFNet model consists of convolutional layers, CrossModalAttention blocks, and skip connections, leading to a parameter count in the range of millions.", + "bbox": [ + 89, + 534, + 482, + 593 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "CrossModalAttention layers: These layers introduce additional tensor operations and memory usage. No external pre-trained models were directly used in training. The architecture was trained from scratch on the provided dataset.", + "bbox": [ + 89, + 595, + 480, + 656 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "GPU Memory Usage: Memory usage is influenced by Batch Size, Default batch size of 4 per GPU, and Voxel Grid Representation, Uses 6 event bins, increasing input size.", + "bbox": [ + 89, + 657, + 480, + 702 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "CrossModalAttention: Inspired by self-attention mechanisms in Transformer models. Hybrid Loss Function: Combines MSE and L1 loss for better generalization.CosineAnnealingLR Scheduler: Used to dynamically adjust learning rates during training.", + "bbox": [ + 89, + 703, + 480, + 779 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Use of Additional Training Data: Only NTIRE Dataset Used: The training was restricted to the HighREV dataset provided by NTIRE. No additional synthetic or external event-based datasets were incorporated. Potential Future Enhancements: Using real-world event datasets (e.g., DSEC, MVSEC) could improve generalization. Finetuning with pre-trained image restoration models (like DeblurGAN) could be explored.", + "bbox": [ + 89, + 780, + 480, + 900 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Quantitative and Qualitative Improvements Quantitative Improvements (Metrics & Performance): Peak Signal-to-Noise Ratio (PSNR): Achieved PSNR: 25.93. Improved compared to baseline event fusion models. Structural Similarity Index (SSIM): Achieved SSIM: 0.82. Indicates better perceptual quality in restored images. Qualitative Improvements (Visual Results & Generalization): Better Detail Recovery: The attention-based fusion of events and images leads to sharper edges and better contrast in reconstructed images. Works well in low-light or high-motion blur scenarios.", + "bbox": [ + 511, + 90, + 903, + 256 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Comparison with Baseline Models: Standard CNN-based deblurring struggles with fine-grained event details, but EnhancedEFNet effectively fuses event features to improve deblurring accuracy. CrossModalAttention aids in spatial alignment of events and images, reducing artifacts. Failure Cases & Future Improvements: Highly blurred images with saturated event data can still cause artifacts. More robust fusion mechanisms (e.g., transformer-based approaches) could further enhance performance.", + "bbox": [ + 511, + 257, + 903, + 393 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 513, + 407, + 671, + 422 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). Shaolin Su was supported by the HORIZON MSCA Postdoctoral Fellowships funded by the European Union (project number 101152858). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab).", + "bbox": [ + 511, + 431, + 903, + 568 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A. Teams and affiliations", + "text_level": 1, + "bbox": [ + 513, + 582, + 725, + 598 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "NTIRE 2025 team", + "text_level": 1, + "bbox": [ + 513, + 606, + 658, + 619 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Title: NTIRE 2025 Event-Based Image Deblurring", + "bbox": [ + 511, + 628, + 903, + 643 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Challenge", + "bbox": [ + 514, + 643, + 584, + 657 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 514, + 660, + 584, + 672 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Lei Sun1 (leo.sun@zju.edu.cn),", + "bbox": [ + 514, + 674, + 725, + 688 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Andrea Alfarano1 (andrea.alfarano@insait.ai),", + "bbox": [ + 514, + 689, + 820, + 703 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Peiqi Duan2 (duanqi0001@pku.edu.cn),", + "bbox": [ + 514, + 704, + 779, + 718 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Shaolin $\\mathrm{Su}^3$ (shaolin@cvc.uab.cat),", + "bbox": [ + 514, + 719, + 750, + 734 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Kaiwei Wang4 (wangkaiwei@zju.edu.cn),", + "bbox": [ + 514, + 734, + 792, + 750 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Boxin Shi $^2$ (shiboxin@pku.edu.cn),", + "bbox": [ + 514, + 750, + 751, + 763 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Radu Timofte $^{5}$ (radu.timofte@uni-wuerzburg.de)", + "bbox": [ + 514, + 765, + 839, + 779 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Danda Pani Paudel1 (danda.paudel@insait.ai),", + "bbox": [ + 514, + 780, + 820, + 794 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Luc Van Gool1 (vangool@vision.ee.ethz.ch),", + "bbox": [ + 514, + 795, + 810, + 809 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 513, + 825, + 596, + 838 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "1 INSAIT, Sofia University \"St. Kliment Ohridski\", Bulgaria", + "2 Peking University, China", + "3 Computer Vision Center, Spain" + ], + "bbox": [ + 514, + 840, + 903, + 900 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "4 Zhejiang University, China", + "5 University of Würzburg, Germany" + ], + "bbox": [ + 89, + 90, + 330, + 121 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "IVISLAB", + "text_level": 1, + "bbox": [ + 91, + 145, + 169, + 159 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Triple Event-stream Image Deblurring Network", + "bbox": [ + 89, + 167, + 452, + 183 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 184, + 161, + 196 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Qinglin Liu $^{1}$ (qlliu@hit.edu.cn), Wei Yu $^{2}$ , Xiaogian Lv $^{1}$ , Lu Yang $^{3}$ , Shuigen Wang $^{3}$ , Shengping Zhang $^{1}$ , Xiangyang Ji $^{2}$", + "bbox": [ + 89, + 196, + 480, + 242 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 91, + 244, + 174, + 258 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Harbin Institute of Technology, Weihai", + "2 Tsinghua University", + "3 Raytron Technology Co., Ltd." + ], + "bbox": [ + 91, + 258, + 359, + 304 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "MiVideoDeblur", + "text_level": 1, + "bbox": [ + 91, + 329, + 215, + 343 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Event-Based Image Deblurring from Team MiVideoDeblur", + "bbox": [ + 89, + 351, + 480, + 380 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 382, + 163, + 395 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Long Bao1 (baolong@xiaomi.com), Yuqiang Yang1, Jinao Song1, Ziyi Wang1, Shuang Wen1, Heng Sun1", + "bbox": [ + 89, + 396, + 480, + 426 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 91, + 428, + 174, + 441 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$^{1}$ Xiaomi Inc., China", + "bbox": [ + 93, + 441, + 230, + 455 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "404NotFound", + "text_level": 1, + "bbox": [ + 91, + 481, + 199, + 496 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Event-Conditioned Dual-Modal Fusion for Motion Deblurring", + "bbox": [ + 89, + 503, + 480, + 534 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 536, + 161, + 547 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Kean Liu1 (rickyliu@mail.ustc.edu.cn), Mingchen Zhong1, Senyan Xu1, Zhijing Sun1, Jiaying Zhu1, Chengjie Ge1, Xingbo Wang1, Yidi Liu1, Xin Lu1, Xueyang Fu1, Zheng-Jun Zha1", + "bbox": [ + 89, + 549, + 480, + 608 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 91, + 609, + 174, + 625 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "1 University of Science and Technology of China", + "bbox": [ + 93, + 625, + 416, + 640 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Give_it_a_try", + "text_level": 1, + "bbox": [ + 91, + 665, + 192, + 681 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Event-Based Image Deblurring from Team Give_it_a_try", + "bbox": [ + 89, + 686, + 480, + 717 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 91, + 718, + 161, + 731 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Dawei Fan $^{1}$ (dawei.fan@partner.samsung.com), Dafeng Zhang $^{1}$ , Yong Yang $^{1}$", + "bbox": [ + 89, + 732, + 480, + 762 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 91, + 763, + 174, + 777 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$^{1}$ Samsung Research China- Beijing (SRC-B)", + "bbox": [ + 93, + 777, + 392, + 792 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "BUPTMM", + "text_level": 1, + "bbox": [ + 91, + 816, + 176, + 830 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Weighted Fusion for Event-based Image Deblurring Members:", + "bbox": [ + 89, + 839, + 478, + 868 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Siru Zhang $^{1}$ (zhangsr@bupt.edu.cn), Qinghua Yang $^{1}$ , Hao Kang $^{1}$ , Huiyuan Fu $^{1}$ , Heng Zhang $^{2}$ , Hongyuan Yu $^{2}$ ,", + "bbox": [ + 89, + 869, + 480, + 901 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Zhijuan Huang", + "text_level": 1, + "bbox": [ + 511, + 90, + 622, + 106 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 513, + 107, + 596, + 119 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1 Beijing University of Posts and Telecommunications, Beijing, China.", + "$^{2}$ Xiaomi Inc., China." + ], + "bbox": [ + 513, + 121, + 903, + 165 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "WEI", + "text_level": 1, + "bbox": [ + 514, + 194, + 555, + 208 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Bi-directional Gathered Recurrent Network for Event-based Image Deblurring", + "bbox": [ + 511, + 217, + 903, + 247 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 514, + 250, + 584, + 261 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Shuoyan Wei1 (shuoyan.wei@bjtu.edu.cn),", + "bbox": [ + 513, + 262, + 800, + 277 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Feng Li $^{2}$ , Runmin Cong $^{3}$", + "bbox": [ + 514, + 277, + 683, + 292 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 514, + 294, + 596, + 306 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Institute of Information Science, Beijing Jiaotong University", + "$^{2}$ School of Computer Science and Engineering, Hefei University of Technology", + "$^{3}$ School of Control Science and Engineering, Shandong University" + ], + "bbox": [ + 514, + 308, + 903, + 398 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "DVS-WHU", + "text_level": 1, + "bbox": [ + 514, + 426, + 604, + 441 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Dual Channel Cross-modal Mamba for Event-based Motion Deblurring", + "bbox": [ + 511, + 450, + 903, + 479 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 514, + 481, + 584, + 493 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Weiqi Luo1 (wikyluo@whu.edu.cn),", + "bbox": [ + 514, + 496, + 754, + 510 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Mingyun Lin1, Chenxu Jiang1, Hongyi Liu1, Lei Yu2 \nAffiliations:", + "bbox": [ + 514, + 510, + 903, + 540 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ School of Electronic Information, Wuhan University", + "$^{2}$ School of Artificial Intelligence, Wuhan University" + ], + "bbox": [ + 514, + 541, + 870, + 571 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "PixelRevive", + "text_level": 1, + "bbox": [ + 514, + 599, + 607, + 613 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Event-Based Image Deblurring from Team PixelRe-vive", + "bbox": [ + 511, + 622, + 903, + 651 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 514, + 652, + 584, + 665 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Weilun Li $^{1}$ (xyj961011@163.com), Jiajun Zhai $^{1}$ , Tingting Lin $^{1}$", + "bbox": [ + 513, + 667, + 903, + 696 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 514, + 698, + 596, + 712 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "1 College of Optical Science and Engineering, Zhejiang University", + "bbox": [ + 514, + 713, + 903, + 743 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "CHD", + "text_level": 1, + "bbox": [ + 514, + 771, + 558, + 785 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Title: Event-Image Deblurformer Network", + "bbox": [ + 513, + 794, + 802, + 809 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Members:", + "text_level": 1, + "bbox": [ + 514, + 811, + 584, + 821 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Shuang Ma1 (3125508679@qq.com), Sai Zhou2, Zhanwen Liu3, Yang Wang4", + "bbox": [ + 513, + 824, + 903, + 854 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Affiliations:", + "text_level": 1, + "bbox": [ + 514, + 856, + 596, + 869 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "1 Chang'an University, Xi'an, China", + "bbox": [ + 514, + 869, + 756, + 885 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "SMU", + "text_level": 1, + "bbox": [ + 91, + 90, + 135, + 104 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Title: Explicit Feature Tracking and Iterative Refinement for Enhancing Event-based Image Deblurring", + "bbox": [ + 89, + 114, + 483, + 145 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 146, + 163, + 157 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Eiffel Chong1, Nuwan Bandara1, Thivya Kandappu1 (thivyak@smu.edu.sg), Archan Misra1", + "bbox": [ + 91, + 159, + 480, + 190 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 191, + 174, + 204 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "$^{1}$ Singapore Management University", + "bbox": [ + 93, + 205, + 334, + 220 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "JNU620", + "text_level": 1, + "bbox": [ + 91, + 252, + 158, + 268 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Title: Event-Based Image Deblurring from Team JNU620", + "bbox": [ + 89, + 276, + 478, + 292 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 292, + 163, + 306 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Yihang Chen $^{1}$ (Ehang@stu.jnu.edu.cn),", + "bbox": [ + 91, + 306, + 354, + 321 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Zhan Li $^{1}$ , Weijun Yuan $^{1}$ , Wenzhuo Wang $^{1}$ , Boyang Yao $^{1}$ , Zhanglu Chen $^{1}$", + "bbox": [ + 91, + 321, + 480, + 352 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 353, + 174, + 367 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "$^{1}$ Department of Computer Science, Jinan University, Guangzhou, China", + "bbox": [ + 91, + 367, + 480, + 398 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "colab", + "text_level": 1, + "bbox": [ + 91, + 430, + 137, + 444 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Title: Dynamic Enhanced Fusion Network for Event-based Image Deblurring", + "bbox": [ + 89, + 454, + 482, + 484 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 486, + 163, + 498 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Yijing Sun $^{1}$ (syj3508852939@163.com), Tianjiao Wan $^{1}$ , Zijian Gao $^{1}$ , Qisheng Xu $^{1}$ , Kele Xu $^{1}$", + "bbox": [ + 91, + 500, + 480, + 530 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 531, + 174, + 544 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "$^{1}$ National University of Defense Technology", + "bbox": [ + 91, + 545, + 390, + 560 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "CMSL", + "text_level": 1, + "bbox": [ + 91, + 592, + 147, + 607 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Title: Cascade Event Deblurring Model With Event Edge Loss", + "bbox": [ + 89, + 616, + 482, + 645 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 648, + 163, + 660 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Yukun Zhang $^{1}$ (zhangyukun@cmhi.chinamobile.com), Yu He $^{1}$ , Xiaoyan Xie $^{1}$ , Tao Fu $^{1}$", + "bbox": [ + 91, + 662, + 480, + 691 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 693, + 174, + 705 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "1 China Mobile (Hangzhou) Information Technology Co., Ltd, Hangzhou, China", + "bbox": [ + 91, + 707, + 480, + 738 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "KUnet", + "text_level": 1, + "bbox": [ + 91, + 768, + 147, + 784 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Title KUnet", + "bbox": [ + 91, + 794, + 174, + 808 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Members:", + "bbox": [ + 91, + 810, + 163, + 823 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Yashu Gautamkumar Patel1 (ypatel37@asu.edu), Vihar Ramesh Jain1, Divesh Basina1", + "bbox": [ + 91, + 824, + 482, + 854 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 91, + 856, + 174, + 869 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "1 Arizona State University", + "bbox": [ + 91, + 869, + 267, + 886 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Group10", + "text_level": 1, + "bbox": [ + 513, + 90, + 588, + 106 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Title: Event-Based Image Deblurring from Team Group10 Members:", + "bbox": [ + 511, + 112, + 906, + 141 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Rishik Ashili $^{1}$ (rishik67_soe@jnu.ac.in), Manish Kumar Manjhi $^{1}$ , Sourav Kumar $^{1}$ , Prinon Benny $^{1}$ , Himanshu Ghunawat $^{1}$ , B Sri Sairam Gautam $^{1}$ , Anett Varghese $^{1}$ , Abhishek Yadav $^{1}$", + "bbox": [ + 511, + 142, + 905, + 202 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Affiliations:", + "bbox": [ + 513, + 204, + 598, + 218 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "1 Jawaharlal Nehru University, New Delhi, India", + "bbox": [ + 514, + 218, + 834, + 233 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 514, + 263, + 609, + 279 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Inigo Alonso and Ana C Murillo. Ev-segnet: Semantic segmentation for event-based cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1", + "[2] Jiaan Chen, Hao Shi, Yaozu Ye, Kailun Yang, Lei Sun, and Kaiwei Wang. Efficient human pose estimation via 3d event point cloud. In 2022 International Conference on 3D Vision (3DV), pages 1-10. IEEE, 2022. 1", + "[3] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration. In European Conference on Computer Vision, pages 17-33. Springer, 2022. 5", + "[4] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution $(\\times 4)$ : Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[5] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[6] Zhangyi Cheng, Xiang Zhang, Lei Yu, Jianzhuang Liu, Wen Yang, and Gui-Song Xia. Recovering continuous scene dynamics from a single blurry image with events. arXiv preprint arXiv:2304.02695, 2023. 8", + "[7] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[8] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[9] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In International Conference on Learning Representations, 2023. 10", + "[10] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy" + ], + "bbox": [ + 514, + 287, + 906, + 901 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozchikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[11] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[12] Guillermo Gallego, Tobi Delbruck, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew Davison, Jörg Conradt, Kostas Daniilidis, and Davide Scaramuzza. Event-based vision: A survey. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):154-180, 2022. 1", + "[13] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[14] Xuanhua He, Ke Cao, Jie Zhang, Keyu Yan, Yingying Wang, Rui Li, Chengjun Xie, Danfeng Hong, and Man Zhou. Panmamba: Effective pan-sharpening with state space model. Information Fusion, 115:102779, 2025. 7", + "[15] Yuhuang Hu, Shih-Chii Liu, and Tobi Delbruck. v2e: From video frames to realistic dvs events. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1312-1321, 2021. 8", + "[16] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[17] J Kim, D K Ghosh, and Y J Jung. Event-based video deblurring based on image and event feature fusion. Expert Systems with Applications, 223:119917, 2023. 10", + "[18] Taewoo Kim, Hoonhee Cho, and Kuk-Jin Yoon. Frequency-aware event-based video deblurring for real-world motion blur. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24966-24976, 2024. 9, 10", + "[19] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[20] Huan Li, Hailong Shi, and Xingyu Gao. A coarse-to-fine fusion network for event-based image deblurring. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 974-982, 2024. 9, 10", + "[21] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby" + ], + "bbox": [ + 91, + 90, + 482, + 901 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[22] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[23] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[24] Jingyun Liang, Jiezhang Cao, Guolei Sun, Kai Zhang, Luc Van Gool, and Radu Timofte. Swinir: Image restoration using swim transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1833-1844, 2021. 5", + "[25] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[26] Kean Liu, Mingchen Zhong, Senyan Xu, Zhijing Sun, Jiaying Zhu, Chengjie Ge, Xin Lu, Xingbo Wang, Xueyang Fu, and Zheng-Jun Zha. Event-conditioned dual-modal fusion for motion deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 4", + "[27] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[28] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[29] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 7, 10", + "[30] Xingyu Lu, Lei Sun, Diyang Gu, and Kaiwei Wang. Sge: structured light system based on gray code with an event camera. Optics Express, 32(26):46044-46061, 2024. 1", + "[31] Xintian Mao, Qingli Li, and Yan Wang. Adarevd: Adaptive patch exiting reversible decoder pushes the limit of image deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25681-25690, 2024. 10" + ], + "bbox": [ + 516, + 90, + 903, + 898 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] Nico Messikommer, Stamatos Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-bracket high dynamic range imaging with event cameras. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 547-557, 2022. 1", + "[33] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-driven feature tracking for event cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5642-5651, 2023. 9", + "[34] Manasi Muglikar, Guillermo Gallego, and Davide Scaramuzza. Esl: Event-based structured light. In 2021 International Conference on 3D Vision (3DV), pages 1165-1174. IEEE, 2021. 1", + "[35] Seungjun Nah, Sungyong Baik, Seokil Hong, Gyeongsik Moon, Sanghyun Son, Radu Timofte, and Kyoung Mu Lee. Ntire 2019 challenge on video deblurring and superresolution: Dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition workshops, pages 1996-2005, 2019. 8", + "[36] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[37] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[38] Timo Stoffregen, Cedric Scheerlinck, Davide Scaramuzza, Tom Drummond, Nick Barnes, Lindsay Kleeman, and Robert Mahony. Reducing the sim-to-real gap for event cameras. In European Conference on Computer Vision, pages 534-549, 2020. 8", + "[39] Lei Sun, Christos Sakaridis, Jingyun Liang, Qi Jiang, Kailun Yang, Peng Sun, Yaozu Ye, Kaiwei Wang, and Luc Van Gool. Event-based fusion for motion deblurring with cross-modal attention. In European Conference on Computer Vision, pages 412-428. Springer, 2022. 1, 3, 4, 6, 7, 8, 9, 10, 11, 12", + "[40] Lei Sun, Christos Sakaridis, Jingyun Liang, Peng Sun, Jiezhang Cao, Kai Zhang, Qi Jiang, Kaiwei Wang, and Luc Van Gool. Event-based frame interpolation with ad-hoc deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18043-18052, 2023. 2, 3, 4, 6, 7, 8, 9, 10", + "[41] Lei Sun, Daniel Gehrig, Christos Sakaridis, Mathias Gehrig, Jingyun Liang, Peng Sun, Zhijie Xu, Kaiwei Wang, Luc Van Gool, and Davide Scaramuzza. A unified framework for event-based frame interpolation with ad-hoc deblurring in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1, 3", + "[42] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc" + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[43] Lei Sun, Yuhan Bao, Jiajun Zhai, Jingyun Liang, Yu lun Zhang, Kaiwei Wang, Danda Pani Paudel, and Luc Van Gool. Low-light image enhancement using event-based illumination estimation. arXiv preprint arXiv:2504.09379, 2025.1", + "[44] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[45] Zhijing Sun, Xueyang Fu, Longzhuo Huang, Aiping Liu, and Zheng-Jun Zha. Motion aware event representation-driven image deblurring. In European Conference on Computer Vision, pages 418-435. Springer, 2024. 4", + "[46] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[47] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[48] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[49] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 5", + "[50] Wenming Weng, Yueyi Zhang, and Zhiwei Xiong. Event-based blurry frame interpolation under blind exposure. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1588-1598, 2023. 6", + "[51] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[52] Wen Yang, Jinjian Wu, Jupo Ma, Leida Li, and Guangming Shi. Motion deblurring via spatial-temporal collaboration of frames and events. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 6531-6539, 2024. 6", + "[53] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Pro" + ], + "bbox": [ + 516, + 90, + 906, + 900 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2", + "[54] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Learning enriched features for real image restoration and enhancement. In European Conference on Computer Vision, pages 492-511. Springer, 2020. 5", + "[55] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5728-5739, 2022. 4, 5, 8, 9", + "[56] Shaobo Zhang, Lei Sun, and Kaiwei Wang. A multi-scale recurrent framework for motion segmentation with event camera. IEEE Access, 11:80105-80114, 2023. 1", + "[57] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 7" + ], + "bbox": [ + 91, + 90, + 482, + 373 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_model.json b/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_model.json new file mode 100644 index 0000000000000000000000000000000000000000..e8f573bc2928baaf8fda0c0805a4cb412fb6eac2 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_model.json @@ -0,0 +1,5450 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.261, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.12401v1 [cs.CV] 16 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.093, + 0.131, + 0.905, + 0.153 + ], + "angle": 0, + "content": "NTIRE 2025 Challenge on Event-Based Image Deblurring: Methods and Results" + }, + { + "type": "text", + "bbox": [ + 0.145, + 0.182, + 0.22, + 0.198 + ], + "angle": 0, + "content": "Lei Sun*" + }, + { + "type": "text", + "bbox": [ + 0.128, + 0.2, + 0.216, + 0.215 + ], + "angle": 0, + "content": "Boxin Shi*" + }, + { + "type": "text", + "bbox": [ + 0.266, + 0.183, + 0.406, + 0.198 + ], + "angle": 0, + "content": "Andrea Alfarano*" + }, + { + "type": "text", + "bbox": [ + 0.258, + 0.2, + 0.376, + 0.215 + ], + "angle": 0, + "content": "Radu Timofte*" + }, + { + "type": "text", + "bbox": [ + 0.451, + 0.183, + 0.548, + 0.199 + ], + "angle": 0, + "content": "Peiqi Duan*" + }, + { + "type": "text", + "bbox": [ + 0.417, + 0.2, + 0.575, + 0.216 + ], + "angle": 0, + "content": "Danda Pani Paudel*" + }, + { + "type": "text", + "bbox": [ + 0.592, + 0.182, + 0.688, + 0.198 + ], + "angle": 0, + "content": "Shaolin Su*" + }, + { + "type": "text", + "bbox": [ + 0.616, + 0.2, + 0.734, + 0.216 + ], + "angle": 0, + "content": "Luc Van Gool*" + }, + { + "type": "text", + "bbox": [ + 0.732, + 0.182, + 0.849, + 0.199 + ], + "angle": 0, + "content": "Kaiwei Wang*" + }, + { + "type": "text", + "bbox": [ + 0.776, + 0.2, + 0.871, + 0.216 + ], + "angle": 0, + "content": "Qinglin Liu" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.217, + 0.169, + 0.233 + ], + "angle": 0, + "content": "Wei Yu" + }, + { + "type": "text", + "bbox": [ + 0.206, + 0.217, + 0.307, + 0.234 + ], + "angle": 0, + "content": "Xiaogian Lv" + }, + { + "type": "text", + "bbox": [ + 0.344, + 0.218, + 0.414, + 0.234 + ], + "angle": 0, + "content": "Lu Yang" + }, + { + "type": "text", + "bbox": [ + 0.45, + 0.218, + 0.568, + 0.234 + ], + "angle": 0, + "content": "Shuigen Wang" + }, + { + "type": "text", + "bbox": [ + 0.606, + 0.218, + 0.747, + 0.235 + ], + "angle": 0, + "content": "Shengping Zhang" + }, + { + "type": "text", + "bbox": [ + 0.782, + 0.218, + 0.89, + 0.234 + ], + "angle": 0, + "content": "Xiangyang Ji" + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.235, + 0.205, + 0.251 + ], + "angle": 0, + "content": "Long Bao" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.235, + 0.307, + 0.252 + ], + "angle": 0, + "content": "Yuqiang" + }, + { + "type": "text", + "bbox": [ + 0.397, + 0.235, + 0.486, + 0.252 + ], + "angle": 0, + "content": "Jinao Song" + }, + { + "type": "text", + "bbox": [ + 0.524, + 0.235, + 0.614, + 0.252 + ], + "angle": 0, + "content": "Ziyi Wang" + }, + { + "type": "text", + "bbox": [ + 0.654, + 0.235, + 0.753, + 0.252 + ], + "angle": 0, + "content": "Shuang Wen" + }, + { + "type": "text", + "bbox": [ + 0.79, + 0.235, + 0.874, + 0.252 + ], + "angle": 0, + "content": "Heng Sun" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.253, + 0.185, + 0.268 + ], + "angle": 0, + "content": "Kean Liu" + }, + { + "type": "text", + "bbox": [ + 0.222, + 0.253, + 0.359, + 0.27 + ], + "angle": 0, + "content": "Mingchen Zhong" + }, + { + "type": "text", + "bbox": [ + 0.397, + 0.253, + 0.486, + 0.269 + ], + "angle": 0, + "content": "Senyan Xu" + }, + { + "type": "text", + "bbox": [ + 0.522, + 0.253, + 0.619, + 0.269 + ], + "angle": 0, + "content": "Zhijing Sun" + }, + { + "type": "text", + "bbox": [ + 0.653, + 0.253, + 0.752, + 0.269 + ], + "angle": 0, + "content": "Jiaying Zhu" + }, + { + "type": "text", + "bbox": [ + 0.788, + 0.253, + 0.872, + 0.269 + ], + "angle": 0, + "content": "Chengjie 6" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.271, + 0.237, + 0.288 + ], + "angle": 0, + "content": "Xingbo Wang" + }, + { + "type": "text", + "bbox": [ + 0.278, + 0.271, + 0.345, + 0.287 + ], + "angle": 0, + "content": "Yidi Liu" + }, + { + "type": "text", + "bbox": [ + 0.485, + 0.271, + 0.583, + 0.287 + ], + "angle": 0, + "content": "Xueyang Fu" + }, + { + "type": "text", + "bbox": [ + 0.623, + 0.271, + 0.745, + 0.287 + ], + "angle": 0, + "content": "Zheng-Jun Zha" + }, + { + "type": "text", + "bbox": [ + 0.786, + 0.271, + 0.872, + 0.286 + ], + "angle": 0, + "content": "Dawei Fan" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.289, + 0.226, + 0.304 + ], + "angle": 0, + "content": "Dafeng Zhang" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.289, + 0.354, + 0.305 + ], + "angle": 0, + "content": "Yong Yang" + }, + { + "type": "text", + "bbox": [ + 0.392, + 0.289, + 0.479, + 0.305 + ], + "angle": 0, + "content": "Siru Zhang" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.289, + 0.634, + 0.305 + ], + "angle": 0, + "content": "Qinghua Yang" + }, + { + "type": "text", + "bbox": [ + 0.671, + 0.289, + 0.755, + 0.305 + ], + "angle": 0, + "content": "Hao Kang" + }, + { + "type": "text", + "bbox": [ + 0.791, + 0.289, + 0.888, + 0.305 + ], + "angle": 0, + "content": "Huiyuan Fu" + }, + { + "type": "text", + "bbox": [ + 0.157, + 0.306, + 0.257, + 0.322 + ], + "angle": 0, + "content": "Heng Zhang" + }, + { + "type": "text", + "bbox": [ + 0.303, + 0.306, + 0.385, + 0.322 + ], + "angle": 0, + "content": "Hongyuan" + }, + { + "type": "text", + "bbox": [ + 0.458, + 0.306, + 0.58, + 0.322 + ], + "angle": 0, + "content": "Zhijuan Huang" + }, + { + "type": "text", + "bbox": [ + 0.625, + 0.306, + 0.731, + 0.322 + ], + "angle": 0, + "content": "Shuoyan Wei" + }, + { + "type": "text", + "bbox": [ + 0.777, + 0.306, + 0.842, + 0.322 + ], + "angle": 0, + "content": "Feng Li" + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.324, + 0.233, + 0.34 + ], + "angle": 0, + "content": "Runmin Cong" + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.324, + 0.357, + 0.34 + ], + "angle": 0, + "content": "Weiqi Luo" + }, + { + "type": "text", + "bbox": [ + 0.399, + 0.324, + 0.503, + 0.339 + ], + "angle": 0, + "content": "Mingyun Lin" + }, + { + "type": "text", + "bbox": [ + 0.543, + 0.324, + 0.651, + 0.339 + ], + "angle": 0, + "content": "Chenxu Jiang" + }, + { + "type": "text", + "bbox": [ + 0.691, + 0.324, + 0.878, + 0.339 + ], + "angle": 0, + "content": "Hongyi Liu Lei Yu" + }, + { + "type": "text", + "bbox": [ + 0.127, + 0.341, + 0.207, + 0.356 + ], + "angle": 0, + "content": "Weilun Li" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.341, + 0.337, + 0.357 + ], + "angle": 0, + "content": "Jiajun Zhai" + }, + { + "type": "text", + "bbox": [ + 0.399, + 0.341, + 0.478, + 0.357 + ], + "angle": 0, + "content": "ngting Lin" + }, + { + "type": "text", + "bbox": [ + 0.519, + 0.341, + 0.611, + 0.357 + ], + "angle": 0, + "content": "Shuang Ma" + }, + { + "type": "text", + "bbox": [ + 0.652, + 0.342, + 0.725, + 0.356 + ], + "angle": 0, + "content": "Sai Zhou" + }, + { + "type": "text", + "bbox": [ + 0.764, + 0.341, + 0.872, + 0.356 + ], + "angle": 0, + "content": "Zhanwen Liu" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.359, + 0.222, + 0.375 + ], + "angle": 0, + "content": "Yang Wang" + }, + { + "type": "text", + "bbox": [ + 0.263, + 0.359, + 0.366, + 0.375 + ], + "angle": 0, + "content": "Eiffel Chong" + }, + { + "type": "text", + "bbox": [ + 0.408, + 0.359, + 0.535, + 0.374 + ], + "angle": 0, + "content": "Nuwan Bandara" + }, + { + "type": "text", + "bbox": [ + 0.574, + 0.359, + 0.718, + 0.375 + ], + "angle": 0, + "content": "Thivya Kandappu" + }, + { + "type": "text", + "bbox": [ + 0.761, + 0.359, + 0.87, + 0.374 + ], + "angle": 0, + "content": "Archan Misra" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.376, + 0.264, + 0.392 + ], + "angle": 0, + "content": "Yihang Chen" + }, + { + "type": "text", + "bbox": [ + 0.309, + 0.376, + 0.374, + 0.392 + ], + "angle": 0, + "content": "Zhan Li" + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.376, + 0.523, + 0.392 + ], + "angle": 0, + "content": "Weijun Yuan" + }, + { + "type": "text", + "bbox": [ + 0.57, + 0.376, + 0.696, + 0.393 + ], + "angle": 0, + "content": "Wenzhuo Wang" + }, + { + "type": "text", + "bbox": [ + 0.741, + 0.376, + 0.84, + 0.392 + ], + "angle": 0, + "content": "Boyang Yao" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.394, + 0.235, + 0.41 + ], + "angle": 0, + "content": "Zhanglu Chen" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.394, + 0.361, + 0.41 + ], + "angle": 0, + "content": "Yijing Sun" + }, + { + "type": "text", + "bbox": [ + 0.401, + 0.394, + 0.507, + 0.41 + ], + "angle": 0, + "content": "Tianjiao Wan" + }, + { + "type": "text", + "bbox": [ + 0.546, + 0.394, + 0.634, + 0.41 + ], + "angle": 0, + "content": "Zijian Gao" + }, + { + "type": "text", + "bbox": [ + 0.674, + 0.394, + 0.77, + 0.41 + ], + "angle": 0, + "content": "Qisheng Xu" + }, + { + "type": "text", + "bbox": [ + 0.81, + 0.394, + 0.877, + 0.409 + ], + "angle": 0, + "content": "Kele Xu" + }, + { + "type": "text", + "bbox": [ + 0.148, + 0.412, + 0.255, + 0.428 + ], + "angle": 0, + "content": "Yukun Zhang" + }, + { + "type": "text", + "bbox": [ + 0.299, + 0.412, + 0.351, + 0.427 + ], + "angle": 0, + "content": "Yu He" + }, + { + "type": "text", + "bbox": [ + 0.395, + 0.412, + 0.496, + 0.428 + ], + "angle": 0, + "content": "Xiaoyan Xie" + }, + { + "type": "text", + "bbox": [ + 0.539, + 0.412, + 0.597, + 0.427 + ], + "angle": 0, + "content": "Tao Fu" + }, + { + "type": "text", + "bbox": [ + 0.64, + 0.412, + 0.852, + 0.427 + ], + "angle": 0, + "content": "Yashu Gautamkumar Patel" + }, + { + "type": "text", + "bbox": [ + 0.155, + 0.429, + 0.305, + 0.445 + ], + "angle": 0, + "content": "Vihar Ramesh Jain" + }, + { + "type": "text", + "bbox": [ + 0.35, + 0.429, + 0.465, + 0.444 + ], + "angle": 0, + "content": "Divesh Basina" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.429, + 0.617, + 0.444 + ], + "angle": 0, + "content": "Rishik Ashili" + }, + { + "type": "text", + "bbox": [ + 0.662, + 0.429, + 0.843, + 0.446 + ], + "angle": 0, + "content": "Manish Kumar Manjhi" + }, + { + "type": "text", + "bbox": [ + 0.15, + 0.447, + 0.268, + 0.462 + ], + "angle": 0, + "content": "Sourav Kumar" + }, + { + "type": "text", + "bbox": [ + 0.311, + 0.447, + 0.422, + 0.463 + ], + "angle": 0, + "content": "Prinon Benny" + }, + { + "type": "text", + "bbox": [ + 0.466, + 0.447, + 0.634, + 0.462 + ], + "angle": 0, + "content": "Himanshu Ghunawat" + }, + { + "type": "text", + "bbox": [ + 0.677, + 0.447, + 0.848, + 0.462 + ], + "angle": 0, + "content": "B Sri Sairam Gautam" + }, + { + "type": "text", + "bbox": [ + 0.31, + 0.465, + 0.432, + 0.481 + ], + "angle": 0, + "content": "Anett Varghese" + }, + { + "type": "text", + "bbox": [ + 0.503, + 0.465, + 0.635, + 0.479 + ], + "angle": 0, + "content": "Abhishek Yadav" + }, + { + "type": "title", + "bbox": [ + 0.249, + 0.516, + 0.326, + 0.531 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.551, + 0.484, + 0.778 + ], + "angle": 0, + "content": "This paper presents an overview of NTIRE 2025 the First Challenge on Event-Based Image Deblurring, detailing the proposed methodologies and corresponding results. The primary goal of the challenge is to design an event-based method that achieves high-quality image deblurring, with performance quantitatively assessed using Peak Signal-to-Noise Ratio (PSNR). Notably, there are no restrictions on computational complexity or model size. The task focuses on leveraging both events and images as inputs for single-image deblurring. A total of 199 participants registered, among whom 15 teams successfully submitted valid results, offering valuable insights into the current state of event-based image deblurring. We anticipate that this challenge will drive further advancements in event-based vision research." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.516, + 0.645, + 0.531 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.545, + 0.907, + 0.652 + ], + "angle": 0, + "content": "Traditional camera output frames with relatively long exposure time in a fixed framerate. In contrast, event cameras, a kind of neuromorphic sensor, asynchronously capture pixelwise intensity changes with high temporal resolution [12], and have been applied in various fields such as computational imaging [32, 39-41, 43], human pose estimation [2], depth estimation [30, 34], image segmentation [1, 56], etc." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.67, + 0.907, + 0.791 + ], + "angle": 0, + "content": "In recent years, significant efforts have been dedicated to event-based image restoration. Among various tasks, event-based image deblurring has gained the most attention, as the high temporal resolution of event cameras provides valuable priors for motion deblurring [39-41]. Notably, these methods operate under the assumption that input images and events are spatially aligned—a condition that applies to all approaches discussed in this paper." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.907, + 0.902 + ], + "angle": 0, + "content": "In conjunction with the NTIRE 2025 Workshop on New Trends in Image Restoration and Enhancement, the Event-Based Image Deblurring Challenge was organized. The objective is to develop a network architecture or solution that effectively integrates events and images to enhance image deblurring performance. We hope that this challenge will serve as a starting point for promoting event-based image" + }, + { + "type": "page_footnote", + "bbox": [ + 0.09, + 0.815, + 0.483, + 0.864 + ], + "angle": 0, + "content": "* L. Sun (lei.sun@insait.ai, INSAIT, Sofia University \"St. Kliment Ohridski\"), A. Alfarano, P. Duan, S. Su, K. Wang, B. Shi, R. Timofte, D. P. Paudel, and L. Van Gool were the challenge organizers, while the other authors participated in the challenge." + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.865, + 0.389, + 0.876 + ], + "angle": 0, + "content": "Appendix A contains the authors' teams and affiliations." + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.877, + 0.366, + 0.888 + ], + "angle": 0, + "content": "NTIRE 2025 webpage: https://cvlai.net/tnire/2025/." + }, + { + "type": "page_footnote", + "bbox": [ + 0.093, + 0.889, + 0.478, + 0.9 + ], + "angle": 0, + "content": "Code: https://github.com/AHupuJR/NTIRE2025_EventDeblur_challenge." + }, + { + "type": "list", + "bbox": [ + 0.09, + 0.815, + 0.483, + 0.9 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.121 + ], + "angle": 0, + "content": "enhancement on a broader stage and contribute to the thriving development of the event-based vision community." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.123, + 0.483, + 0.394 + ], + "angle": 0, + "content": "This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [47], reflection removal in the wild [51], shadow removal [46], event-based image deblurring [42], image denoising [44], XGC quality assessment [27], UGC video enhancement [37], night photography rendering [10], image super-resolution (x4) [4], real-world face restoration [5], efficient super-resolution [36], HR depth estimation [53], efficient burst HDR and restoration [19], cross-domain few-shot object detection [11], short-form UGC video quality assessment and enhancement [22, 23], text to image generation model quality assessment [13], day and night raindrop removal for dual-focused images [21], video quality assessment for video conferencing [16], low light image enhancement [28], light field super-resolution [48], restore any image model (RAIM) in the wild [25], raw restoration and super-resolution [7] and raw reconstruction from RGB on smartphones [8]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.409, + 0.483, + 0.445 + ], + "angle": 0, + "content": "2. NTIRE 2025 Event-Based Image Deblurring Challenge" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.453, + 0.483, + 0.558 + ], + "angle": 0, + "content": "The goals of this challenge include: (1) promoting research in the area of event-based image deblurring, (2) facilitating comparisons between various methods, and (3) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge, including the dataset, challenge phases and evaluation criteria." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.57, + 0.187, + 0.584 + ], + "angle": 0, + "content": "2.1. Dataset" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.592, + 0.483, + 0.683 + ], + "angle": 0, + "content": "The HighREV dataset [40] is used for both training and evaluation in this challenge. It consists of 1,771 sets of blurry images, corresponding events, and sharp images for training. Additionally, 421 sets are provided as validation data during the development phase, ensuring a comprehensive benchmark for assessing model performance." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.694, + 0.314, + 0.71 + ], + "angle": 0, + "content": "2.2. Tracks and Competition" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.716, + 0.483, + 0.762 + ], + "angle": 0, + "content": "The aim is to obtain a network design capable to produce high-quality results with the best performance measured by PSNR for event-based image deblurring." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.771, + 0.483, + 0.877 + ], + "angle": 0, + "content": "Challenge phases Participants were given access to training images from the HighREV dataset. During the validation phase, they could use 421 images from the validation set for model tuning. In the test phase, evaluation was performed on 271 images from the test set. To ensure a fair assessment, the ground-truth images for the test phase remained hidden from participants throughout the challenge." + }, + { + "type": "table", + "bbox": [ + 0.527, + 0.089, + 0.897, + 0.333 + ], + "angle": 0, + "content": "
TeamRankPSNR (primary)SSIM
IVISLAB142.790.9196
MiVideoDeblur242.700.9281
404NotFound342.090.9300
Give_it_a_try440.370.9234
BUPTMM540.210.9179
WEI639.460.9171
DVS-WHU739.260.9101
PixelRevive839.120.9112
CHD938.560.9055
SMU1038.300.9047
JNU6201137.630.9019
colab1236.840.8962
CMSL1331.810.8900
KUnet1429.420.8600
Group101525.930.8200
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.345, + 0.905, + 0.4 + ], + "angle": 0, + "content": "Table 1. Results of NTIRE 2025 Event-Based Image Deblurring Challenge. PSNR and SSIM scores are measured on the 271 test images from HighREV dataset. Team rankings are based primarily on PSNR." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.424, + 0.905, + 0.547 + ], + "angle": 0, + "content": "Evaluation protocol Since the aim of this challenge is to foster the development of accurate event-based image deblurring networks, PSNR and SSIM on the 271 testing images are used as the quantitative evaluation metrics. A code example for calculating these metrics is available at https://github.com/AHupuJR/NTIRE2025_EventDeblurChallenge. The code of the submitted solutions and the pretrained weights are also available in this repository." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.558, + 0.689, + 0.574 + ], + "angle": 0, + "content": "3. Challenge Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.583, + 0.905, + 0.675 + ], + "angle": 0, + "content": "Table 1 shows the final rankings and test results of the participated teams. The implementation details of each team can be found in Sec.4, while team member information can be found in Appendix A. IVISLAB achieved the first place in terms of PSNR, followed by MiVideoDeblur and 404NotFound as the second and third place, respectively." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.681, + 0.645, + 0.697 + ], + "angle": 0, + "content": "3.1. Participants" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.703, + 0.905, + 0.734 + ], + "angle": 0, + "content": "The challenge attracted 199 registered participants, with 15 teams successfully submitting valid results." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.741, + 0.779, + 0.756 + ], + "angle": 0, + "content": "3.2. Main Ideas and Architectures" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.763, + 0.905, + 0.824 + ], + "angle": 0, + "content": "Throughout the challenge, participants explored various innovative techniques to improve deblurring performance. Below, we summarize some of the key strategies employed by the top-performing teams." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.826, + 0.905, + 0.901 + ], + "angle": 0, + "content": "1. Hybrid architectures demonstrated strong performance, with all top-3 teams utilizing a combination of transformers and convolutional networks. This approach leverages global features extracted by transformers alongside local features captured by convolutional" + }, + { + "type": "footer", + "bbox": [ + 0.115, + 0.888, + 0.293, + 0.9 + ], + "angle": 0, + "content": "https://www.cvlai.net/ntire/2025/" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.11, + 0.092, + 0.483, + 0.152 + ], + "angle": 0, + "content": "layers, both of which contribute to effective event-based image deblurring. Besides, both spatial and channel attention mechanisms play a crucial role in enhancing overall performance." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.153, + 0.483, + 0.227 + ], + "angle": 0, + "content": "2. Pretrained weights matters. The winning team, IVISLAB, leveraged a backbone model initialized with pretrained weights from ImageNet, demonstrating the advantages of transfer learning in event-based image deblurring." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.228, + 0.483, + 0.287 + ], + "angle": 0, + "content": "3. Cross-modal fusion proves beneficial. Several teams adopted EFNet [39] and REFID [40, 41] as a baseline model to fuse features from the event and image branches." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.288, + 0.483, + 0.363 + ], + "angle": 0, + "content": "4. Effective training strategies. Both the second and third-place teams employed progressive learning techniques during training. Additionally, the winning team utilized a large patch size \\((512 \\times 512)\\), which contributed to improved performance." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.364, + 0.483, + 0.453 + ], + "angle": 0, + "content": "5. Incorporating a novel Mamba-based architecture. Integrating features from both image and event modalities is crucial for enhancing the reconstruction quality of event-based deblurring methods. Team DVS-WHU introduced an innovative Mamba-based architecture to achieve more effective fusion." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.153, + 0.483, + 0.453 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.462, + 0.192, + 0.477 + ], + "angle": 0, + "content": "3.3. Fairness" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.484, + 0.483, + 0.651 + ], + "angle": 0, + "content": "To maintain fairness in the event-based image deblurring challenge, specific rules were implemented, primarily regarding the datasets used for training. Participants were permitted to use external datasets for training. However, incorporating the HighREV validation set, whether sharp or blurry images, was strictly prohibited, as this set served to evaluate the overall performance and generalizability of the models. Additionally, the use of HighREV test blurry images for training was not allowed. On the other hand, employing advanced data augmentation techniques during training was considered an acceptable practice." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.663, + 0.373, + 0.68 + ], + "angle": 0, + "content": "4. Challenge Methods and Teams" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.687, + 0.203, + 0.702 + ], + "angle": 0, + "content": "4.1. IVISLAB" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.709, + 0.483, + 0.846 + ], + "angle": 0, + "content": "To achieve image deblurring, team IVISLAB introduces the Triple Event-stream Image Deblurring Network (TEIDNet). As depicted in Figure 1, TEIDNet converts consecutive events into event voxels at three temporal scales to perceive motion information from blur images and capture fine edges for reconstructing clear images. Furthermore, TEIDNet integrates Shift Window Attention and Channel-Wise Attention blocks to capture local and global contexts, thereby enhancing deblurring accuracy." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.852, + 0.291, + 0.866 + ], + "angle": 0, + "content": "4.1.1. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.901 + ], + "angle": 0, + "content": "TEIDNet adopts an encoder-decoder architecture to process images and triple-stream event voxels, aiming to estimate" + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.091, + 0.903, + 0.219 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.23, + 0.905, + 0.256 + ], + "angle": 0, + "content": "Figure 1. The model architecture of TEIDNet, proposed by Team IVISLAB." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.284, + 0.907, + 0.692 + ], + "angle": 0, + "content": "the deblurred image. Specifically, when deblurring the image at frame \\( t \\), TEIDNet considers that the long-term event stream surrounding frame \\( t \\) can aid in motion perception. Therefore, it voxelizes the event data from frame \\( t - T_{l} \\) to frame \\( t + T_{l} \\) into a \\( b \\)-bin event voxel \\( V_{l,t} \\). Simultaneously, since the short-term event stream around frame \\( t \\) can help reconstruct high-frequency textures, TEIDNet voxelizes the event data from frame \\( t - T_{s} \\) to frame \\( t + T_{s} \\) into a \\( b \\)-bin event voxel \\( V_{s,t} \\). Furthermore, to mitigate color artifacts by leveraging higher-resolution motion information near the current frame, TEIDNet voxelizes the event data from frame \\( t - T_{m} \\) to frame \\( t + T_{m} \\) into a \\( b \\)-bin event voxel \\( V_{m,t} \\). Subsequently, the event voxels \\( V_{l,t}, V_{s,t} \\), and \\( V_{m,t} \\), along with the blur image \\( I_{b} \\), are concatenated and fed into the network. To effectively fuse the features from the image and event voxels, TEIDNet employs convolutional layers to generate fused feature representations. The network then utilizes a dual-branch encoder. The first, a complex branch extracts high-level semantic information from the fused features by leveraging shift window attention to capture local context and channel-wise attention blocks to capture global context. The second, a simple branch utilizes convolutional layers to capture fine-grained details from the fused features. Next, TEIDNet's decoder integrates multiple shift window attention blocks to fuse and upsample the features extracted by the dual-branch encoder. Finally, convolutional layers are employed to predict the deblurred image \\( I_{t} \\)." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.698, + 0.659, + 0.712 + ], + "angle": 0, + "content": "4.1.2. Loss Function" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.717, + 0.905, + 0.748 + ], + "angle": 0, + "content": "To train TEIDNet, they define a reconstruction loss \\(\\mathcal{L}_r\\) for the estimated deblurred image \\(I_{t}\\) as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.586, + 0.757, + 0.905, + 0.775 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {r} = \\lambda_ {1} \\mathrm {L} _ {1} \\left(I _ {t}, I _ {t} ^ {g t}\\right) + \\lambda_ {2} \\mathrm {L} _ {2} \\left(I _ {t}, I _ {t} ^ {g t}\\right) \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.785, + 0.905, + 0.845 + ], + "angle": 0, + "content": "Here, \\(\\lambda_{1}\\) and \\(\\lambda_{2}\\) are coefficients that balance the loss terms. The function \\(\\mathrm{L}_1(\\cdot ,\\cdot)\\) represents the mean absolute error, while \\(\\mathrm{L}_2(\\cdot ,\\cdot)\\) denotes the mean squared error. The term \\(I_t^{gt}\\) refers to the ground truth image at frame \\(t\\)" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.852, + 0.724, + 0.867 + ], + "angle": 0, + "content": "4.1.3. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.907, + 0.901 + ], + "angle": 0, + "content": "TEIDNet is implemented using PyTorch on four Nvidia L20 GPUs. During training, a batch size of 16 is utilized, with" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.089, + 0.49, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.271, + 0.483, + 0.299 + ], + "angle": 0, + "content": "Figure 2. The framework of DASTF-Net, proposed by Team MiVideoDeblur." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.326, + 0.483, + 0.538 + ], + "angle": 0, + "content": "input data dimensions of \\(512 \\times 512\\) pixels. The network weights are optimized over 1000 epochs using the AdamW optimizer, with an initial learning rate set to \\(2 \\times 10^{-5}\\). A cosine annealing scheduler is employed to decay the learning rate progressively. In addition, they take the checkpoint with good performance and perform a second finetune. To mitigate overfitting, data augmentation techniques such as random flipping and rotation are applied. They also initialize the backbone network parameters using weights pretrained on ImageNet. The specific coefficients and parameters are defined as follows: number of bins \\(b = 7\\), long-term temporal window \\(T_{l} = 5\\), medium-term temporal window \\(T_{m} = 1\\), short-term temporal window \\(T_{s} = 0\\), and loss function weights \\(\\lambda_{1} = 1\\), \\(\\lambda_{2} = 1\\)." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.547, + 0.248, + 0.562 + ], + "angle": 0, + "content": "4.2. MiVideoDeblur" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.569, + 0.483, + 0.795 + ], + "angle": 0, + "content": "Introduction. As illustrated in Fig. 2, their team proposed the Dual Attention Spatio-Temporal Fusion Network(DASTF-Net). Motivated by EFNet [39], their model employs a two-stage encoder-decoder architecture. Initially, two encoders separately extract multi-scale features from both the image and event data. Based on the EGACA module [40] and the FAF module [45], they have designed the Temporal Fusion Residual Block (TFRB) and Multi-Scale Cross-Attention Fusion Block (MSCAFB), which perform feature fusion in the temporal and spatial dimensions, respectively. By incorporating a dual-attention mechanism, these modules effectively enhance the model's performance. Following feature fusion, the fused features are fed into a Restormer [55], which further leverages the feature information to improve the model's performance." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.796, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Training strategy. They employed a four-stage training strategy. In the first stage, the network was trained for 160k iterations using the PSNRLoss function. AdamW Optimizer was used, with an initial learning rate of 2e-4 and a cosine annealing learning rate schedule for updates. Subsequently, in the second stage, data augmentation techniques were introduced, which included adding random Gaussian" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.198 + ], + "angle": 0, + "content": "noise and applying random scaling to the input data. Building upon the model from the first stage, the training continued for 80k iterations with an initial learning rate of 1e-4. For the third and fourth stages, the patch size was progressively increased from 256 to 320 and then to 480. The network was trained for 40k iterations in the third stage and 45k iterations in the fourth stage." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.21, + 0.655, + 0.225 + ], + "angle": 0, + "content": "4.3. 404NotFound" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.233, + 0.907, + 0.475 + ], + "angle": 0, + "content": "Their team proposes EV-Deblurformer[26], a framework consisting of two complementary models designed to fully leverage the temporal dynamics of video sequences and the rich texture details present in single images. The framework includes two distinct components: Video-SFHformer, developed for video-based deblurring, and EFSformer, tailored for single-image deblurring. In Video-SFHformer, they introduce STFBlock to enhance the model's capacity for long-range temporal modeling. In EFSformer, they incorporate STEFusionBlock, which fuses event features from the frequency domain to improve spatial detail restoration. To achieve optimal performance, as shown in Section 4.3.3, a sequence-level ensemble strategy is employed to merge the outputs of both models. A progressive training scheme is also adopted to enhance robustness and effectiveness." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.486, + 0.673, + 0.501 + ], + "angle": 0, + "content": "4.3.1. Overall Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.506, + 0.906, + 0.627 + ], + "angle": 0, + "content": "Figure 3 illustrates the overall architecture of their proposed method, EV-Deblurformer. This approach, built upon the two models: Video SFHformer and EFSformer, fully exploits the rich temporal dynamics and sharp edge information provided by event data. For the video deblurring model, they propose the Video-SFHformer based on SFHformer. For the single-image motion deblurring model, they propose the EFSformer built on EFNet[39]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.638, + 0.724, + 0.654 + ], + "angle": 0, + "content": "4.3.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.658, + 0.906, + 0.748 + ], + "angle": 0, + "content": "They implement their proposed network via the PyTorch 2.1.2 platform. Adam optimizer with parameters \\(\\beta_{1} = 0.9\\) and \\(\\beta_{2} = 0.999\\) is adopted to optimize their network. Motivated by [55] they introduce the progressive training strategy. The training phase of their network could be divided into two stages:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.907, + 0.902 + ], + "angle": 0, + "content": "(1) Initial training of EV-Deblurformer. They use a progressive training strategy at first. For the video-based motion deblurring model, they start training with patch size \\( 152 \\times 152 \\) with batch size of 16 for 250K iterations. The patch size and batch size pairs are updated to \\( [(192^2, 12), (256^2, 8), (304^2, 8)] \\) at iterations [250K, 200K, 150K]. The initial learning rate is \\( 2 \\times 10^{-4} \\) and remains unchanged when patch size is 192. Later, the learning rate is set to \\( 1 \\times 10^{-4} \\) and \\( 7 \\times 10^{-5} \\) for patch and batch size pairs of \\( (256^2, 8) \\) and \\( (304^2, 8) \\), respectively. They employ a" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.135, + 0.087, + 0.457, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.461, + 0.087, + 0.862, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.134, + 0.287, + 0.861, + 0.48 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.491, + 0.907, + 0.507 + ], + "angle": 0, + "content": "Figure 3. The architecture diagram of EV-Deblurformer, proposed by Team 404NotFound, is designed for event-guided motion deblurring." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.532, + 0.486, + 0.744 + ], + "angle": 0, + "content": "cosine annealing learning rate decay strategy, gradually reducing the learning rate. For the single-image-based motion deblurring model, They begin training with a patch size of \\(192 \\times 192\\) and a batch size of 12 for 250K iterations. During training, patch size and batch size pairs are progressively updated to \\((256^{2}, 10)\\), \\((288^{2}, 8)\\), and \\((320^{2}, 8)\\) at 36K, 24K, and 24K iterations, respectively. The initial learning rate is set to \\(5 \\times 10^{-4}\\), and later adjusted to \\(1 \\times 10^{-4}\\), \\(7 \\times 10^{-5}\\), and \\(5 \\times 10^{-5}\\) corresponding to the updated patch and batch size configurations. A cosine annealing schedule is employed to gradually decay the learning rate throughout the training process. The first stage is performed on the NVIDIA RTX 4090 GPU. They obtain the best model at this stage as the initialization of the second stage." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.75, + 0.487, + 0.904 + ], + "angle": 0, + "content": "(2) Fine-tuning EV-Deblurformer. For the video-based motion deblurring model, they start training with a patch size of \\(320 \\times 320\\) and a batch size of 4 for 150K iterations. The initial learning rate is set to \\(1 \\times 10^{-5}\\) and is adjusted to \\(1 \\times 10^{-7}\\) using a cosine annealing schedule, over a total of 150K iterations. They use the entire training data from the challenge without applying any data augmentation techniques. The exponential moving average (EMA) is employed for the dynamic adjustment of the model parameters. For the single-image-based motion deblurring model, they" + }, + { + "type": "list", + "bbox": [ + 0.089, + 0.532, + 0.487, + 0.904 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.532, + 0.907, + 0.576 + ], + "angle": 0, + "content": "adopt the same training strategy as used in the video-based motion deblurring model. The second training stage is conducted on an NVIDIA RTX 4090 GPU." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.579, + 0.909, + 0.685 + ], + "angle": 0, + "content": "(3) Evaluation Metrics They utilize two widely adopted reference-based evaluation metrics—Peak Signal-to-Noise Ratio (PSNR) and Structural Similarity Index Measure (SSIM)[49]—to evaluate the effectiveness of their method, following prior works[3, 24, 54, 55]. Higher PSNR and SSIM values generally reflect better performance in image restoration tasks." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.697, + 0.704, + 0.714 + ], + "angle": 0, + "content": "4.3.3. Ensemble Strategies" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.718, + 0.907, + 0.795 + ], + "angle": 0, + "content": "Ensemble learning has been proven to be an effective technique in image restoration. Its most basic application involves integrating the outputs of multiple models and applying a fusion strategy to achieve results with better generalization and greater stability in restoration quality." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.796, + 0.909, + 0.901 + ], + "angle": 0, + "content": "The HighREV-test dataset consists of four sequences. Among them, one is an outdoor scene, which differs markedly from the other three in terms of object diversity, texture richness, and color composition. Based on this observation, they explore a sequence-level ensemble strategy that selectively exchanges outputs between Video-SFHformer and EFSformer." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.111, + 0.089, + 0.465, + 0.159 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.168, + 0.483, + 0.21 + ], + "angle": 0, + "content": "Figure 4. An overview of the method proposed by Team BUPTMM: They set the weights for the fusion, with \\(\\alpha\\) set to 0.6 and \\(\\beta\\) to 0.4." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.243, + 0.484, + 0.351 + ], + "angle": 0, + "content": "Specifically, they start with the best-performing Video-SFHformer model and replace the output of the outdoor sequence in the HighREV-test set with the corresponding result generated by EFSformer. The results in Table 1 show that their approach yields the best performance, achieving the highest SSIM score and ranking third overall in the NTIRE Event-Based Image Deblurring Challenge." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.369, + 0.226, + 0.385 + ], + "angle": 0, + "content": "4.4. Give_it_a_try" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.395, + 0.251, + 0.409 + ], + "angle": 0, + "content": "4.4.1. General method" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.417, + 0.484, + 0.508 + ], + "angle": 0, + "content": "This submission is mainly based on the public code of another team. Models used in this submission are EFNet att track fusion and EFNet att track fusion new, which can be found atarchs orarchs/tested. They change the training strategy, finetune the models and combine two best models to push the limits of scoring." + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.517, + 0.484, + 0.592 + ], + "angle": 0, + "content": "- How event modality is utilized in the deblurring process: They used the given SCER format event voxels in training, validating and training. The usage is as same as original EFNet [39] since new networks retain the encoder module of the baseline." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.611, + 0.299, + 0.626 + ], + "angle": 0, + "content": "4.4.2. Implementation details" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.63, + 0.172, + 0.644 + ], + "angle": 0, + "content": "- Training:" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.645, + 0.483, + 0.84 + ], + "angle": 0, + "content": "In the first stage of training, all models are trained for \\(2 \\times 10^{5}\\) iterations with a batch size of 16 by PSNR loss function with AdamW optimizer. In each training batch, each paired images and event voxel are randomly cropped to \\(256 \\times 256\\) and augmented by random flipping and rotation. The learning rate is initialized as \\(3 \\times 10^{-4}\\), and a cosine annealing scheduler is used to drop the final learning rate as \\(10^{-7}\\). They finetuned the models obtained from the first stage with a patch size of \\(512 \\times 512\\). At this stage, all models are trained for another \\(2 \\times 10^{5}\\) iterations with a batch size of 4 and the learning rate drop from \\(2 \\times 10^{-5}\\) to \\(10^{-6}\\). Models are validated for every \\(10^{4}\\) iterations. Other settings remain unchanged." + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.841, + 0.262, + 0.856 + ], + "angle": 0, + "content": "- Validating and Testing:" + }, + { + "type": "text", + "bbox": [ + 0.103, + 0.856, + 0.483, + 0.901 + ], + "angle": 0, + "content": "They chose the highest validated models for each network during the fine-tuning stage and average two models' output as final result to improve robustness." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.634, + 0.105 + ], + "angle": 0, + "content": "4.5. BUPTMM" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.114, + 0.648, + 0.128 + ], + "angle": 0, + "content": "4.5.1. Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.134, + 0.906, + 0.21 + ], + "angle": 0, + "content": "Our solution is built on EFNet[39] and STCNet[52]. Inspired by [50], they introduce a detail enhancement module that follows the EFNet prediction stage. The whole pipeline is illustrated in Fig. 4. The detail enhancement module adopts a simple U-Net structure." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.221, + 0.724, + 0.235 + ], + "angle": 0, + "content": "4.5.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.241, + 0.906, + 0.331 + ], + "angle": 0, + "content": "Both EFNet and STCNet are initialized with pre-trained GoPro checkpoints. They fine-tune them separately using the NTIRE official training dataset without additional data, aside from the pre-trained GoPro weights. The patch size is set to \\(1024 \\times 1024\\), and they employ the CosineAnnealingLR scheduler to adjust the learning rate." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.333, + 0.906, + 0.361 + ], + "angle": 0, + "content": "The key differences in the training strategies for EFNet and STCNet are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.364, + 0.906, + 0.529 + ], + "angle": 0, + "content": "For EFNet, they train EFNet for 100k iterations with a batch size of 4 using 4 NVIDIA H800 GPUs. The optimizer is AdamW with an initial learning rate of 2e-4. They generate the event voxel grid following the official script, setting the bin size to 24. Due to differences in the event encoder's channel size, they extended the pre-trained GoPro checkpoint weights from 6 to 24 bins. The loss function consists of the L1 loss, the Charbonnier loss, and the Sobel loss, with respective weights of 1.0, 0.5, and 0.5. Unlike the official EFNet implementation, they do not apply a mask between the two stages." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.531, + 0.906, + 0.605 + ], + "angle": 0, + "content": "ForNet, they train STCNet for 1000 epochs with a batch size of 8 using 4 NVIDIA H800 GPUs. The optimizer is Adam with an initial learning rate of 2e-4. They use the official event voxel grid with a bin size of 6. The loss function is the Charbonnier loss." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.62, + 0.588, + 0.633 + ], + "angle": 0, + "content": "4.6. WEI" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.642, + 0.906, + 0.749 + ], + "angle": 0, + "content": "Since REFID [40] is an excellent method of event-based blurry video frame interpolation (VFI), considering the differences in modeling image deblurring and VFI problems, they adapt the REFID structure to fit the image deblurring challenge. As shown in Fig. 5, they develop a Bi-directional Gathered Recurrent Network (BGRN) for event-based image deblurring." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.759, + 0.714, + 0.773 + ], + "angle": 0, + "content": "4.6.1. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.779, + 0.906, + 0.87 + ], + "angle": 0, + "content": "Following REFID [40], the events within the exposure time \\((t - \\Delta t\\to t + \\Delta t)\\) are represented as a voxel grid \\(V_{t - \\Delta t\\rightarrow t + \\Delta t}\\in \\mathbb{R}^{(M + 1)\\times H\\times W}\\), where \\(M\\) is set to 9. Furthermore, they divide the voxel \\(V_{t - \\Delta t\\rightarrow t + \\Delta t}\\) into two segments \\(V_{t - \\Delta t\\rightarrow t}\\) and \\(V_{t + \\Delta t\\rightarrow t}\\) to perform forward and backward iterations, respectively." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.906, + 0.901 + ], + "angle": 0, + "content": "The BGRN consists of image and event branches. Only a blurry image \\(B_{t}\\) is fed into the image branch, and the" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.111, + 0.087, + 0.891, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.089, + 0.392, + 0.908, + 0.449 + ], + "angle": 0, + "content": "Figure 5. The architecture of the Bi-directional Gathered Recurrent Network (BGRN), proposed by Team Wei, is designed for event-based image deblurring and serves as an enhanced reconfiguration network for REFID. [40]. \"EVR Block\": event recurrent block [40], \"EGACA\": event-guided adaptive channel attention [40], \"SConv\": stripped convolution, \"TConv\": transposed convolution, \"Bi-Fusion\": bidirectional fusion." + }, + { + "type": "text", + "bbox": [ + 0.093, + 0.474, + 0.485, + 0.779 + ], + "angle": 0, + "content": "network output is the corresponding sharp image \\(\\hat{I}_t\\). Besides, they split the original event branch into a forward recurrent branch and a backward recurrent branch, which respectively and recurrently consumes sub-voxels of forward event voxel \\(V_{t - \\Delta t\\to t}\\) and backward event voxel \\(V_{t + \\Delta t\\rightarrow t}\\) in a gathered way. In each recurrent iteration, the sub-voxel \\(V_{sub}\\in \\mathbb{R}^{2\\times H\\times W}\\) is fed to the event branch, which encodes the event information for the latent frame. To fuse the features obtained from forward and backward recurrent branching, the outputs of both directions are fed into a channel cascade and \\(1\\times 1\\) convolution at each scale (\"Bi-Fusion\" in Fig. 5). Then, they are added element by element with the features of the corresponding scale of the decoder. In addition, to reduce redundancy, they removed the recurrent structure of the decoder section and replaced it with residual blocks. Finally, to make the network learn high-frequency information, the output of the last residual block and the initial features of the blurred image are added element by element, and then the sharp image \\(\\hat{I}_t\\) is obtained through a \\(3\\times 3\\) convolution." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.79, + 0.3, + 0.807 + ], + "angle": 0, + "content": "4.6.2. Implementation details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.81, + 0.486, + 0.903 + ], + "angle": 0, + "content": "Training strategy. They train BGRN with the HighREV training dataset specified by the organizer with a batch size of 4 for 200k iterations on an NVIDIA GeForce RTX 3090 GPU. They crop the input images and event voxels to \\(256 \\times 256\\) for training and use horizontal and vertical flips for data enhancement. AdamW [29] with an initial learning" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.475, + 0.908, + 0.701 + ], + "angle": 0, + "content": "rate of \\(2 \\times 10^{-4}\\) and a cosine learning rate annealing strategy with \\(1 \\times 10^{-7}\\) as the minimum learning rate are adopted for optimization. They use a PSNR loss [39] as supervision. Ensemble strategy. During testing, they found that images prefixed with \"zigzag\" showed a large difference in brightness compared to other normal images. To adapt to this sudden change in brightness, they select images with the prefix \"sternwatz_window\" similar to this scene from the training set. Then, they double their brightness to fine-tune the pre-trained BGRN model for 5k iterations with an initial learning rate of \\(2 \\times 10^{-5}\\). Therefore, the ensemble strategy is applied when testing, i.e., the abnormally bright images (prefixed with \"zigzag\") are processed with the fine-tuned model, and the others are processed with the initial pretrained model." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.709, + 0.638, + 0.724 + ], + "angle": 0, + "content": "4.7.DVS-WHU" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.731, + 0.714, + 0.745 + ], + "angle": 0, + "content": "4.7.1. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Positioned at Fig. 6, the proposed Dual Channel Cross-modal Mamba (DCCM) architecture comprises three primary components: two Shallow Feature Extraction (SFE) modules, a series of \\(N\\) dual channel blocks (with \\(N = 20\\) in their experimental configuration), each containing two Residual Dense Blocks (RDB) [57] and two Cross Modal Mamba (CMM) [14] blocks, and a Global Feature Fusion (GFF) module. Initially, both blur image and events (represented in 24-bin voxel grids) are processed through the SFE module for preliminary feature extraction. Subsequently," + } + ], + [ + { + "type": "image", + "bbox": [ + 0.103, + 0.09, + 0.468, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.097, + 0.244, + 0.48, + 0.356 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.092, + 0.367, + 0.482, + 0.383 + ], + "angle": 0, + "content": "Figure 6. Architecture of DCCM, proposed by Team DVS-WHU." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.407, + 0.483, + 0.452 + ], + "angle": 0, + "content": "the dual channel blocks facilitate in-depth feature extraction and cross-modal interaction. Finally, the GFF module synthesizes the ultimate latent sharp image." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.452, + 0.483, + 0.619 + ], + "angle": 0, + "content": "The core concept of their network is to establish a mutual compensatory relationship between the features derived from event data and those from blurred images through a dual-channel framework. Specifically, while event data are often characterized by significant noise, images typically exhibit lower noise levels. The CMM block is employed to incorporate image features into the event data, thereby mitigating the noise present in the events. Conversely, event data are rich in sharp edge information, and the CMM block also facilitates the integration of event features into blurred images, ultimately contributing to the deblurred result." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.625, + 0.303, + 0.64 + ], + "angle": 0, + "content": "4.7.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.644, + 0.483, + 0.826 + ], + "angle": 0, + "content": "The network is created with PyTorch and trained on two NVIDIA GeForce RTX 3090 GPUs for 150 epochs with ground-truth-guided L1 norm loss. The training process is composed of two phases. During the first phase, they follow the strategy of Cheng et al.[6] and pretrain their DCCM on the mixed dataset including synthetic REDS dataset[35] and semi-synthetic HQF dataset[38] with a learning rate fixed at \\(1 \\times 10^{-4}\\) for 50 epochs. In the second phase, the network is fine-tuned on the HighREV dataset[40] where the images are randomly cropped into \\(256 \\times 256\\) patches with horizontal flipping for data augmentation and the learning rate linearly decays to \\(1 \\times 10^{-5}\\) until the 150th epoch." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.834, + 0.219, + 0.849 + ], + "angle": 0, + "content": "4.8. PixelRevive" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.855, + 0.483, + 0.903 + ], + "angle": 0, + "content": "The model they used was the same as the EFNet[39]. The key to the improved performance of their model lied in the utilization of additional datasets during training and" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.332 + ], + "angle": 0, + "content": "the adoption of larger image sizes in the final fine-tuning phase. They employed a two-stage training strategy. First, they used an Events Simulator called V2E[15] to generate Events from REDS dataset. To generate the dataset, they used timestamp resolution as 0.001, dvs exposure duration as 0.001. The remaining parameters were configured identical to those specified in the V2E paper. They get over 20,000 pairs of events, blur images and sharp images. They trained the model on REDS for 250,000 iters, with gt_size 256, patch size 8. When training on simulated datasets with the HighREV validation set, they observed a paradoxical divergence: while the training PSNR consistently improved, the validation PSNR exhibited a decline. This counterintuitive phenomenon may stem from distributional discrepancies between synthetic data and HighREV characteristics across multiple feature dimensions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.333, + 0.907, + 0.485 + ], + "angle": 0, + "content": "Then, they finetuned it on HighREV train dataset for 200,000 iters, with gt_size 512, patch size 8. The True-CosineAnnealingLR scheduler was employed in both training phases, configured with a period matching the total training iterations and a minimum learning rate value of 1e-7. After experiments, they found that larger gt_size can improve the PSNR by about 0.5. Experiments showed performance decreases when gt_size exceeds 512 (tested range: 256-608), making 512 the optimal size. Other strategy is same as the EFNet." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.492, + 0.593, + 0.507 + ], + "angle": 0, + "content": "4.9. CHD" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.514, + 0.906, + 0.605 + ], + "angle": 0, + "content": "As illustrated in Fig. 7, team CHD develops an efficient Event-Image Deblurformer Network (EIDFNet) based on the Restormer architecture [55]. To address the computational bottleneck encountered when restoring high-resolution blurry images using event data, they incorporate key design elements from EFNet [39]." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.611, + 0.715, + 0.625 + ], + "angle": 0, + "content": "4.9.1. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Considering the speed of model training, they still used the official 6-channel voxel grid event representation to achieve a balance between efficiency and precision. They input the blurred image and the event representation with consistent spatial resolution into the network and employ the modified Transformer Block to fuse the cross-modal feature. Firstly, they modify the transformer block in Restormer [55] as a fusion module to achieve full interaction between different feature channels by setting the number of input and output dims in the GDFN and adding \\(1 \\times 1\\) convolution in the residual connections. Additionally, they build a mutually enhanced fusion encoder based on the Event-Image CrossModal Attention Fusion Module (EICA) proposed in EFNet [39]. The enhanced image features are obtained using K and V derived from event embeddings, while Q is sourced from image embeddings. Conversely, the enhanced event features are generated with K and V originating from image embeddings, with Q being drawn from event embeddings." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.089, + 0.478, + 0.427 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.438, + 0.483, + 0.467 + ], + "angle": 0, + "content": "Figure 7. The framework of Event-Image Deblurformer Network (EIDFNet), proposed by Team CHD." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.509, + 0.483, + 0.676 + ], + "angle": 0, + "content": "In order to achieve comprehensive integration of event and image features, the enhanced image features and enhanced event features are concatenated along the channel dimension. Subsequently, these concatenated features are fused using a Modified Transformer Block. Ultimately, each encoder produces enhanced image features, enhanced event features, and fused features. The enhanced event and image features undergo downsampling before being input into the subsequent encoder. The fusion feature is directly linked to the corresponding decoding feature through a skip connection." + }, + { + "type": "title", + "bbox": [ + 0.09, + 0.708, + 0.262, + 0.723 + ], + "angle": 0, + "content": "4.9.2. Training Strategy" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.735, + 0.483, + 0.903 + ], + "angle": 0, + "content": "They perform progressive learning strategy flow the settings in Restormer [55] and trained the model on a A100 GPU with L1 loss. The network is trained on smaller image patches in the early epochs and on gradually larger patches in the later training epochs. During the training process, the batch sizes are [4,3,2,2,1,1], and the patch sizes are [128,160,192,256,320,384] with the iterations are [92000,64000,48000,36000,36000,24000]. They employ the AdamW optimizer with an initial learning rate 3e-4 that follows a CosineAnnealingRestartCyclicLR decay strategy." + }, + { + "type": "image", + "bbox": [ + 0.523, + 0.095, + 0.898, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.529, + 0.307, + 0.89, + 0.322 + ], + "angle": 0, + "content": "Figure 8. Overview of the proposed pipeline by Team SMU." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.348, + 0.6, + 0.362 + ], + "angle": 0, + "content": "4.10. SMU" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.371, + 0.645, + 0.384 + ], + "angle": 0, + "content": "4.10.1. Motivation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.389, + 0.905, + 0.585 + ], + "angle": 0, + "content": "Inspired by recent successes in cross-knowledge sharing between events and RGB frames [39], hierarchical temporal and frequency modelling [18, 40] and stage-wise fine-fusion [20] for the task of event-based RGB deblurring, they propose to modify the base EFNet model [39] such that the modified model serves as a unified framework which (1) iteratively fine-tunes the coarser deblurred images through two stages of extensive fine-fusion to combat the insufficiencies of the existing decoding techniques while (2) can optionally be made to be specifically aware of propagated frequency information in latent representations to locally and globally filter the blur features in the RGB images through leveraging event features in the frequency domain." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.587, + 0.906, + 0.782 + ], + "angle": 0, + "content": "In addition, to the best knowledge, none of the existing methods for event-based RGB deblurring recognizes the importance of feature tracking in this task which can be beneficial especially in challenging conditions such as high contrast (i.e. very bright or dark surroundings) and fast motion (i.e., large pixel displacements within an accumulated event volume) scenarios [33] towards robust performance. To address this limitation, they explicitly employ a data-driven feature tracking module in the pipeline, an inline feature tracker block, such that event feature tracks corresponding to different points in the reference RGB frame are intuitively incorporated in the learning process specifically in the initial stages of the unified framework." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.792, + 0.722, + 0.805 + ], + "angle": 0, + "content": "4.10.2. Network Architecture" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.811, + 0.906, + 0.9 + ], + "angle": 0, + "content": "As depicted in Fig. 8, they propose three main modifications: the inline feature tracker module, bidirectional frame fusion and AdaRevD refinement, to the original EFNet, backed by the motivation as described in section 4.10.1 and validated through the experiments. To this end, they design the inline feature tracker such that the latent RGB and event" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.242 + ], + "angle": 0, + "content": "features are merged and learned through a flow autoencoder block in combination with a Conv-LSTM block to retrieve the temporal alignment of features. Furthermore, it is to be noted that they place the tracker at an initial stage of the pipeline to ensure that the tracker has the access to the high-level features of each modality, rather than the deeper low-level features, since high-level features, which are close to the input data, are more promising to contain information on temporal propagation, which is critical for co-aligned feature tracking." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.243, + 0.483, + 0.469 + ], + "angle": 0, + "content": "Inspired by [20], they design the first stage of refinement using a bidirectional frame fusion block, specifically targeting the spatiotemporal information flow between adjacent coarse frames while in the second stage of refinement, they further refine the output from the first refinement stage with an objective to identify the still remaining degradation patterns in the RGB space and tackle them using an adaptive patch exiting reversible decoder module [31]. Optionally, to implement the frequency-based filtering of blur features, they follow the cross-modal frequency (CMF) module proposed by [18] such that latent representations at each level of the first U-Net are passed through CMF modules, and concatenated in the decoder levels, in a hierarchical fashion to enhance the latent feature representations with frequency-aware characteristics." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.475, + 0.309, + 0.489 + ], + "angle": 0, + "content": "4.10.3. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.493, + 0.482, + 0.584 + ], + "angle": 0, + "content": "They train the models using one NVIDIA 3090 GPU machine in two stages: (1) primary event-RGB fusion pipeline including the proposed frequency-aware module, explicit feature tracking and the first iteration of refinement based on the bidirectional frame fusion block and (2) second iteration of refinement based on AdaRevD framework [31]." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.585, + 0.482, + 0.66 + ], + "angle": 0, + "content": "By following the baseline implementation [39], they train the models on the HighREV dataset, in both stages, with an initial learning rate of \\(2 \\times 10^{-4}\\) for a total of \\(2 \\times 10^{4}\\) iterations. The utilized optimizer is AdamW [29] and the learning objective is set to be PSNR loss [39]." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.667, + 0.199, + 0.683 + ], + "angle": 0, + "content": "4.11.JNU620" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.689, + 0.483, + 0.901 + ], + "angle": 0, + "content": "As shown in Fig. 9, their framework adopts EFNet [39] as the baseline architecture. To enchance frequency-aware feature processing, a selection frequency block (SF Block) [9] is integrated following each decoder. The architecture introduces two key components: 1) A multi-branch dynamic selection frequency (MDSF) module that adaptively decouples feature mappings into distinct frequency components through dynamic convolution operations; 2) A multi-branch compact selection frequency (MCSF) module specifically designed to expand the receptive field for processing degraded blurry images. Multiple data augmentation strategies were employed, including horizontal and vertical shiftings. For data preparation, they implemented multiple augmentation strategies including horizontal and vertical spa" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.243 + ], + "angle": 0, + "content": "tial shifts. The model was trained for 120,000 iterations on an NVIDIA GeForce RTX 3090 GPU with a batch size of 4. The models were optimized by the Adam method with \\(\\beta_{1} = 0.9\\) and \\(\\beta_{2} = 0.99\\) and the weight decay was set to \\(10^{-4}\\). The initial learning rate was set to \\(2 \\times 10^{-4}\\), gradually decreased following a cosine annealing schedule. In inference phase, each test image undergoes augmentation through horizontal and vertical flips before input into the model. The final restored image is generated by averaging all augmented outputs." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.255, + 0.602, + 0.27 + ], + "angle": 0, + "content": "4.12. colab" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.279, + 0.905, + 0.49 + ], + "angle": 0, + "content": "Our team proposes an improved method based on EFNet, named DEFNet (Dynamic Enhanced Fusion Network). This method incorporates three key enhancements. First, we introduce a multi-scale dynamic fusion module, which fuses event and image features at multiple spatial resolutions, significantly improving the restoration of fine details in blurred areas[17]. Second, we enhance the original EICA module by integrating a bidirectional attention mechanism, enabling more effective mutual guidance and interaction between image and event features. Third, for processing event data, we adopt a weighted interpolation strategy[40] that models the dynamic weighting of event sequences more accurately, thereby enriching the temporal details provided to the image restoration process." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.501, + 0.628, + 0.515 + ], + "angle": 0, + "content": "4.12.1. Network" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.521, + 0.905, + 0.627 + ], + "angle": 0, + "content": "Fig. 10 presents the architecture of DEFNet, which is built upon EFNet and incorporates the newly introduced modules: the multi-scale dynamic fusion module and the enhanced EICA module with a bidirectional attention mechanism. These components work collaboratively to optimize the motion deblurring process by improving feature representation and fusion between the image and event data." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.629, + 0.906, + 0.84 + ], + "angle": 0, + "content": "During the deblurring process, event streams are used to provide fine-grained temporal variation information that guides the restoration of motion blur in image frames. Specifically, the Symmetric Cumulative Event Representation (SCER) encodes the temporal distribution of events while the enhanced Event-Image Cross-modal Attention Fusion (EICA) module leverages bidirectional attention to facilitate deeper interaction between modalities. Additionally, the integration of weighted interpolation improves the temporal alignment and accuracy of event feature extraction. Together, these components enable DEFNet to more effectively restore motion-blurred images by enhancing edge sharpness, preserving texture, and capturing motion dynamics with higher fidelity." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.851, + 0.733, + 0.866 + ], + "angle": 0, + "content": "4.12.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.905, + 0.901 + ], + "angle": 0, + "content": "We use the AdamW optimizer with an initial learning rate of 2e-4, weight decay of 1e-4, and betas set to [0.9, 0.99]." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.094, + 0.817, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.319, + 0.68, + 0.334 + ], + "angle": 0, + "content": "Figure 9. The model framework proposed by Team JNU620." + }, + { + "type": "image", + "bbox": [ + 0.156, + 0.358, + 0.416, + 0.675 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.69, + 0.46, + 0.705 + ], + "angle": 0, + "content": "Figure 10. DEFNet architecture, proposed by Team colab." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.735, + 0.484, + 0.901 + ], + "angle": 0, + "content": "To dynamically adjust the learning rate, we used the True-CosineAnnealingLR scheduler with a maximum iteration count of T_max = 200000 and a minimum learning rate of 1e-7. During training, the batch size was set to 4, and 3 worker threads were used per GPU. The total number of training iterations was set to 40000. This method was trained and validated on the HighREV dataset. The model achieved significant improvements on both the training and validation sets, with PSNR and SSIM used as evaluation metrics during training. Validation was performed every 10,000 iterations, and the model was regularly saved." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.36, + 0.611, + 0.374 + ], + "angle": 0, + "content": "4.13.CMSL" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.382, + 0.906, + 0.442 + ], + "angle": 0, + "content": "The Cascade Event Deblurring Model With Event Edge Loss was built based on EFNet [39]. An motion edge loss and a cascade framework were introduced to enhance the performance of EFNet." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.443, + 0.907, + 0.655 + ], + "angle": 0, + "content": "The EFNet backbone was adopted and two improvements were proposed. Firstly, the event data were organized and represented as voxel [39]. Then, two frame of the event voxels that were most close to the center of the exposure time were multiplied to produce a motion edge frame. The motion edge frame contains the edge of the moving objects in the current frame as shown in fig. 11, fig. 12 is the corresponding edge of the ground truth image (sharp image). As shown in fig. 11 and fig. 12, the motion edge contains clear lines that were consistent with the true edges and could served as a guiding information for image deblurring. The edge of the deblured image output by the module should be similar to the motion edge. Therefore, a motion edge loss were proposed as follow:" + }, + { + "type": "equation", + "bbox": [ + 0.612, + 0.67, + 0.808, + 0.688 + ], + "angle": 0, + "content": "\\[\n\\ell_ {e d g e} = \\operatorname {m s e} (e d g e (\\widehat {x}) \\cdot m, e)\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.597, + 0.696, + 0.824, + 0.712 + ], + "angle": 0, + "content": "\\[\nm _ {i, j} = 1 \\quad \\text {i f} \\quad e _ {i, j} > \\tau , \\quad \\text {e l s e} \\quad 0\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.906, + 0.78 + ], + "angle": 0, + "content": "where \\(\\mathrm{mse(A,B)}\\) is the mean squared error between each element in matrix A and B, \\(\\widehat{x}\\) is the output deblured image, e is the motion edge frame, m is the motion edge mask, \\(\\tau\\) is the threshold parameter." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.781, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Secondly, a cascade frame work were proposed that two EFNet was connected in cascade to further enhance the image deblurring ability. The first EFNet took four frames of the event voxels that were relatively remote to the center of the exposure time while the second EFNet took two frames of the event voxels that were relatively close to the center of the exposure time. The two EFNet form a coarse-fine paradigm that gradually remove the motion delur." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.101, + 0.094, + 0.476, + 0.314 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.14, + 0.329, + 0.434, + 0.344 + ], + "angle": 0, + "content": "Figure 11. The visualization of the motion edges." + }, + { + "type": "image", + "bbox": [ + 0.101, + 0.367, + 0.476, + 0.585 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.146, + 0.601, + 0.428, + 0.615 + ], + "angle": 0, + "content": "Figure 12. The edges in the ground truth frame" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.644, + 0.188, + 0.658 + ], + "angle": 0, + "content": "4.14. KUnet" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.668, + 0.234, + 0.682 + ], + "angle": 0, + "content": "4.14.1. Architecture" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.688, + 0.483, + 0.793 + ], + "angle": 0, + "content": "Their solution is built upon a custom KUnet backbone tailored for event-based image deblurring. The model employs a dual-encoder strategy that separately processes RGB images and voxelized event data, each through a dedicated encoder branch. At the bottleneck, the features are fused via channel-wise concatenation and passed through a transformer module." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.796, + 0.483, + 0.903 + ], + "angle": 0, + "content": "A key novelty in the design is the use of KANLinear layers within the transformer block. These layers, based on spline-interpolated kernels, improve attention expressiveness without adding significant computational overhead. This fusion architecture leverages the temporal sharpness of events with the spatial-semantic richness of RGB images to produce high-fidelity deblurred outputs." + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.089, + 0.709, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.712, + 0.09, + 0.887, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.202, + 0.906, + 0.23 + ], + "angle": 0, + "content": "Figure 13. Left: Input blurry frame. Right: output of KUnet, with detailed texture." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.258, + 0.733, + 0.273 + ], + "angle": 0, + "content": "4.14.2. Implementation Details" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.277, + 0.906, + 0.351 + ], + "angle": 0, + "content": "They train the model from scratch on the official NTIRE 2025 HighREV dataset without any external data or pretrained weights. The voxelized events are represented using 6 temporal bins, generating a 6-channel input tensor for the event encoder." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.353, + 0.906, + 0.444 + ], + "angle": 0, + "content": "Training was conducted using 2 NVIDIA A100 GPUs with a batch size of 8 and a patch size of \\(256 \\times 256\\). They trained the network for 150k iterations using the AdamW optimizer (\\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.99\\), weight decay = 1e-4) and a CosineAnnealingLR scheduler. Data augmentations included random horizontal flips and rotations." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.444, + 0.905, + 0.489 + ], + "angle": 0, + "content": "The loss function includes a PSNR loss weighted at 0.5. Their final checkpoint achieved a peak PSNR of 29.42 on the NTIRE 2025 validation phase." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.49, + 0.906, + 0.566 + ], + "angle": 0, + "content": "Inference was performed using a sliding window approach with a max minibatch size of 8. They observed an inference time of \\(\\sim 0.15\\) seconds per frame on an A100 GPU, and a memory footprint of approximately 16 GB during training." + }, + { + "type": "title", + "bbox": [ + 0.534, + 0.566, + 0.672, + 0.58 + ], + "angle": 0, + "content": "Model Complexity:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.581, + 0.65, + 0.594 + ], + "angle": 0, + "content": "Parameters: 11M" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.596, + 0.681, + 0.61 + ], + "angle": 0, + "content": "FLOPs: Not computed" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.611, + 0.793, + 0.626 + ], + "angle": 0, + "content": "- GPU Memory Usage: 16 GB (training)" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.627, + 0.724, + 0.639 + ], + "angle": 0, + "content": "Inference Time: 0.15s/frame" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.581, + 0.793, + 0.639 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.534, + 0.642, + 0.685, + 0.655 + ], + "angle": 0, + "content": "Code and Resources:" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.657, + 0.906, + 0.687 + ], + "angle": 0, + "content": "- GitHub: https://github.com/Splendor73/NTIRE2025_EventDeblur_challenge_asu" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.688, + 0.906, + 0.717 + ], + "angle": 0, + "content": "- Pretrained: https://www.dropbox.com/scl/fi/19td2xtbzxed2bg8tc9w0/17_KUnet.zip" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.718, + 0.906, + 0.749 + ], + "angle": 0, + "content": "- Results: https://www.dropbox.com/scl/fi/yrky29x2mdwt3k8e40yol/Results.zip" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.657, + 0.906, + 0.749 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.757, + 0.63, + 0.774 + ], + "angle": 0, + "content": "4.15. Group10" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.78, + 0.906, + 0.856 + ], + "angle": 0, + "content": "The solution is built upon a custom adaptation of the EFNet deblurring framework[39]. The method strategically harnesses both conventional image data and event-based information to mitigate motion blur effectively. Key components of the approach include:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.856, + 0.906, + 0.902 + ], + "angle": 0, + "content": "Dual-Stream Network Architecture: The model consists of parallel convolutional streams. One stream processes the blurry input image, while the other processes event data," + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.152 + ], + "angle": 0, + "content": "which is converted into a voxel grid representation. A cross-modal attention module subsequently fuses the features extracted from both modalities, enhancing the network's ability to recover fine details in dynamic scenes." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.153, + 0.482, + 0.244 + ], + "angle": 0, + "content": "Event Data Representation: The raw event data - comprising spatial coordinates, timestamps, and polarity - is transformed into a voxel grid. This process involves temporal normalization and spatial mapping, enabling the network to capture the dynamic nature of motion events with high precision." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.245, + 0.482, + 0.35 + ], + "angle": 0, + "content": "Training Strategy: Utilizing mixed precision training to maximize GPU efficiency and accelerate the convergence process. Gradient accumulation is employed to effectively simulate a larger batch size, which is critical for stable training on high-resolution data. The training loss is computed using the Mean Squared Error (MSE) criterion, guiding the network to produce high-quality deblurred images." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.351, + 0.482, + 0.44 + ], + "angle": 0, + "content": "Data Pipeline: Custom PyTorch Dataset classes handle the loading and preprocessing of both image and event data. The pipeline includes resizing, normalization, and careful synchronization between blurry images and their corresponding event data, ensuring data consistency across modalities." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.443, + 0.482, + 0.518 + ], + "angle": 0, + "content": "Performance Evaluation: The evaluation strategy employs widely accepted metrics such as PSNR and SSIM to quantify restoration quality. Test outputs are resized to their original dimensions and saved as lossless PNG images to preserve the fidelity of the results." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.52, + 0.289, + 0.533 + ], + "angle": 0, + "content": "Additional details include:" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.535, + 0.483, + 0.594 + ], + "angle": 0, + "content": "Parameter Count: The EnhancedEFNet model consists of convolutional layers, CrossModalAttention blocks, and skip connections, leading to a parameter count in the range of millions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.597, + 0.482, + 0.657 + ], + "angle": 0, + "content": "CrossModalAttention layers: These layers introduce additional tensor operations and memory usage. No external pre-trained models were directly used in training. The architecture was trained from scratch on the provided dataset." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.658, + 0.482, + 0.703 + ], + "angle": 0, + "content": "GPU Memory Usage: Memory usage is influenced by Batch Size, Default batch size of 4 per GPU, and Voxel Grid Representation, Uses 6 event bins, increasing input size." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.704, + 0.482, + 0.78 + ], + "angle": 0, + "content": "CrossModalAttention: Inspired by self-attention mechanisms in Transformer models. Hybrid Loss Function: Combines MSE and L1 loss for better generalization.CosineAnnealingLR Scheduler: Used to dynamically adjust learning rates during training." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.781, + 0.482, + 0.901 + ], + "angle": 0, + "content": "Use of Additional Training Data: Only NTIRE Dataset Used: The training was restricted to the HighREV dataset provided by NTIRE. No additional synthetic or external event-based datasets were incorporated. Potential Future Enhancements: Using real-world event datasets (e.g., DSEC, MVSEC) could improve generalization. Finetuning with pre-trained image restoration models (like DeblurGAN) could be explored." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.905, + 0.257 + ], + "angle": 0, + "content": "Quantitative and Qualitative Improvements Quantitative Improvements (Metrics & Performance): Peak Signal-to-Noise Ratio (PSNR): Achieved PSNR: 25.93. Improved compared to baseline event fusion models. Structural Similarity Index (SSIM): Achieved SSIM: 0.82. Indicates better perceptual quality in restored images. Qualitative Improvements (Visual Results & Generalization): Better Detail Recovery: The attention-based fusion of events and images leads to sharper edges and better contrast in reconstructed images. Works well in low-light or high-motion blur scenarios." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.258, + 0.905, + 0.394 + ], + "angle": 0, + "content": "Comparison with Baseline Models: Standard CNN-based deblurring struggles with fine-grained event details, but EnhancedEFNet effectively fuses event features to improve deblurring accuracy. CrossModalAttention aids in spatial alignment of events and images, reducing artifacts. Failure Cases & Future Improvements: Highly blurred images with saturated event data can still cause artifacts. More robust fusion mechanisms (e.g., transformer-based approaches) could further enhance performance." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.408, + 0.673, + 0.424 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.433, + 0.905, + 0.569 + ], + "angle": 0, + "content": "This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). Shaolin Su was supported by the HORIZON MSCA Postdoctoral Fellowships funded by the European Union (project number 101152858). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab)." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.583, + 0.727, + 0.599 + ], + "angle": 0, + "content": "A. Teams and affiliations" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.607, + 0.659, + 0.621 + ], + "angle": 0, + "content": "NTIRE 2025 team" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.629, + 0.905, + 0.644 + ], + "angle": 0, + "content": "Title: NTIRE 2025 Event-Based Image Deblurring" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.645, + 0.585, + 0.659 + ], + "angle": 0, + "content": "Challenge" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.661, + 0.586, + 0.673 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.675, + 0.726, + 0.689 + ], + "angle": 0, + "content": "Lei Sun1 (leo.sun@zju.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.69, + 0.821, + 0.704 + ], + "angle": 0, + "content": "Andrea Alfarano1 (andrea.alfarano@insait.ai)," + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.705, + 0.781, + 0.719 + ], + "angle": 0, + "content": "Peiqi Duan2 (duanqi0001@pku.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.72, + 0.751, + 0.735 + ], + "angle": 0, + "content": "Shaolin \\(\\mathrm{Su}^3\\) (shaolin@cvc.uab.cat)," + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.736, + 0.794, + 0.75 + ], + "angle": 0, + "content": "Kaiwei Wang4 (wangkaiwei@zju.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.751, + 0.753, + 0.765 + ], + "angle": 0, + "content": "Boxin Shi\\(^2\\) (shiboxin@pku.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.766, + 0.841, + 0.78 + ], + "angle": 0, + "content": "Radu Timofte\\(^{5}\\) (radu.timofte@uni-wuerzburg.de)" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.781, + 0.821, + 0.795 + ], + "angle": 0, + "content": "Danda Pani Paudel1 (danda.paudel@insait.ai)," + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.796, + 0.812, + 0.81 + ], + "angle": 0, + "content": "Luc Van Gool1 (vangool@vision.ee.ethz.ch)," + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.826, + 0.598, + 0.839 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.841, + 0.905, + 0.871 + ], + "angle": 0, + "content": "1 INSAIT, Sofia University \"St. Kliment Ohridski\", Bulgaria" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.872, + 0.694, + 0.886 + ], + "angle": 0, + "content": "2 Peking University, China" + }, + { + "type": "ref_text", + "bbox": [ + 0.516, + 0.887, + 0.734, + 0.901 + ], + "angle": 0, + "content": "3 Computer Vision Center, Spain" + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.841, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "text", + "bbox": [ + 0.091, + 0.091, + 0.285, + 0.106 + ], + "angle": 0, + "content": "4 Zhejiang University, China" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.106, + 0.331, + 0.122 + ], + "angle": 0, + "content": "5 University of Würzburg, Germany" + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.091, + 0.331, + 0.122 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.146, + 0.171, + 0.16 + ], + "angle": 0, + "content": "IVISLAB" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.169, + 0.453, + 0.184 + ], + "angle": 0, + "content": "Title: Triple Event-stream Image Deblurring Network" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.185, + 0.163, + 0.197 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.198, + 0.482, + 0.243 + ], + "angle": 0, + "content": "Qinglin Liu\\(^{1}\\) (qlliu@hit.edu.cn), Wei Yu\\(^{2}\\), Xiaogian Lv\\(^{1}\\), Lu Yang\\(^{3}\\), Shuigen Wang\\(^{3}\\), Shengping Zhang\\(^{1}\\), Xiangyang Ji\\(^{2}\\)" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.245, + 0.175, + 0.259 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.259, + 0.361, + 0.274 + ], + "angle": 0, + "content": "1 Harbin Institute of Technology, Weihai" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.275, + 0.241, + 0.29 + ], + "angle": 0, + "content": "2 Tsinghua University" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.29, + 0.302, + 0.305 + ], + "angle": 0, + "content": "3 Raytron Technology Co., Ltd." + }, + { + "type": "list", + "bbox": [ + 0.092, + 0.259, + 0.361, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.33, + 0.216, + 0.344 + ], + "angle": 0, + "content": "MiVideoDeblur" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.352, + 0.482, + 0.381 + ], + "angle": 0, + "content": "Title: Event-Based Image Deblurring from Team MiVideoDeblur" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.383, + 0.164, + 0.396 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.397, + 0.482, + 0.427 + ], + "angle": 0, + "content": "Long Bao1 (baolong@xiaomi.com), Yuqiang Yang1, Jinao Song1, Ziyi Wang1, Shuang Wen1, Heng Sun1" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.429, + 0.175, + 0.442 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.443, + 0.232, + 0.457 + ], + "angle": 0, + "content": "\\(^{1}\\) Xiaomi Inc., China" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.482, + 0.2, + 0.497 + ], + "angle": 0, + "content": "404NotFound" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.505, + 0.482, + 0.535 + ], + "angle": 0, + "content": "Title: Event-Conditioned Dual-Modal Fusion for Motion Deblurring" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.537, + 0.163, + 0.549 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.55, + 0.482, + 0.609 + ], + "angle": 0, + "content": "Kean Liu1 (rickyliu@mail.ustc.edu.cn), Mingchen Zhong1, Senyan Xu1, Zhijing Sun1, Jiaying Zhu1, Chengjie Ge1, Xingbo Wang1, Yidi Liu1, Xin Lu1, Xueyang Fu1, Zheng-Jun Zha1" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.611, + 0.175, + 0.625 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.625, + 0.417, + 0.641 + ], + "angle": 0, + "content": "1 University of Science and Technology of China" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.666, + 0.194, + 0.682 + ], + "angle": 0, + "content": "Give_it_a_try" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.687, + 0.482, + 0.718 + ], + "angle": 0, + "content": "Title: Event-Based Image Deblurring from Team Give_it_a_try" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.719, + 0.162, + 0.732 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.733, + 0.482, + 0.763 + ], + "angle": 0, + "content": "Dawei Fan\\(^{1}\\) (dawei.fan@partner.samsung.com), Dafeng Zhang\\(^{1}\\), Yong Yang\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.764, + 0.175, + 0.778 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.778, + 0.393, + 0.794 + ], + "angle": 0, + "content": "\\(^{1}\\) Samsung Research China- Beijing (SRC-B)" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.818, + 0.178, + 0.832 + ], + "angle": 0, + "content": "BUPTMM" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.84, + 0.48, + 0.869 + ], + "angle": 0, + "content": "Title: Weighted Fusion for Event-based Image Deblurring Members:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.87, + 0.482, + 0.902 + ], + "angle": 0, + "content": "Siru Zhang\\(^{1}\\) (zhangsr@bupt.edu.cn), Qinghua Yang\\(^{1}\\), Hao Kang\\(^{1}\\), Huiyuan Fu\\(^{1}\\), Heng Zhang\\(^{2}\\), Hongyuan Yu\\(^{2}\\)," + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.091, + 0.624, + 0.107 + ], + "angle": 0, + "content": "Zhijuan Huang" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.108, + 0.598, + 0.121 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.122, + 0.904, + 0.152 + ], + "angle": 0, + "content": "1 Beijing University of Posts and Telecommunications, Beijing, China." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.152, + 0.659, + 0.166 + ], + "angle": 0, + "content": "\\(^{2}\\) Xiaomi Inc., China." + }, + { + "type": "list", + "bbox": [ + 0.514, + 0.122, + 0.904, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.195, + 0.556, + 0.209 + ], + "angle": 0, + "content": "WEI" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.218, + 0.905, + 0.248 + ], + "angle": 0, + "content": "Title: Bi-directional Gathered Recurrent Network for Event-based Image Deblurring" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.25, + 0.586, + 0.262 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.263, + 0.801, + 0.278 + ], + "angle": 0, + "content": "Shuoyan Wei1 (shuoyan.wei@bjtu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.279, + 0.684, + 0.294 + ], + "angle": 0, + "content": "Feng Li\\(^{2}\\), Runmin Cong\\(^{3}\\)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.295, + 0.598, + 0.308 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.309, + 0.904, + 0.339 + ], + "angle": 0, + "content": "\\(^{1}\\) Institute of Information Science, Beijing Jiaotong University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.339, + 0.904, + 0.37 + ], + "angle": 0, + "content": "\\(^{2}\\) School of Computer Science and Engineering, Hefei University of Technology" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.37, + 0.904, + 0.4 + ], + "angle": 0, + "content": "\\(^{3}\\) School of Control Science and Engineering, Shandong University" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.309, + 0.904, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.428, + 0.605, + 0.442 + ], + "angle": 0, + "content": "DVS-WHU" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.451, + 0.905, + 0.481 + ], + "angle": 0, + "content": "Title: Dual Channel Cross-modal Mamba for Event-based Motion Deblurring" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.482, + 0.586, + 0.494 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.497, + 0.756, + 0.511 + ], + "angle": 0, + "content": "Weiqi Luo1 (wikyluo@whu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.511, + 0.904, + 0.541 + ], + "angle": 0, + "content": "Mingyun Lin1, Chenxu Jiang1, Hongyi Liu1, Lei Yu2 \nAffiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.542, + 0.872, + 0.556 + ], + "angle": 0, + "content": "\\(^{1}\\) School of Electronic Information, Wuhan University" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.557, + 0.863, + 0.572 + ], + "angle": 0, + "content": "\\(^{2}\\) School of Artificial Intelligence, Wuhan University" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.542, + 0.872, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.6, + 0.608, + 0.614 + ], + "angle": 0, + "content": "PixelRevive" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.623, + 0.904, + 0.652 + ], + "angle": 0, + "content": "Title: Event-Based Image Deblurring from Team PixelRe-vive" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.654, + 0.586, + 0.666 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.668, + 0.904, + 0.697 + ], + "angle": 0, + "content": "Weilun Li\\(^{1}\\) (xyj961011@163.com), Jiajun Zhai\\(^{1}\\), Tingting Lin\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.699, + 0.598, + 0.713 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.714, + 0.905, + 0.744 + ], + "angle": 0, + "content": "1 College of Optical Science and Engineering, Zhejiang University" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.772, + 0.559, + 0.786 + ], + "angle": 0, + "content": "CHD" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.795, + 0.803, + 0.81 + ], + "angle": 0, + "content": "Title: Event-Image Deblurformer Network" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.812, + 0.585, + 0.823 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.825, + 0.904, + 0.856 + ], + "angle": 0, + "content": "Shuang Ma1 (3125508679@qq.com), Sai Zhou2, Zhanwen Liu3, Yang Wang4" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.857, + 0.598, + 0.87 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.871, + 0.757, + 0.886 + ], + "angle": 0, + "content": "1 Chang'an University, Xi'an, China" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.092, + 0.091, + 0.137, + 0.105 + ], + "angle": 0, + "content": "SMU" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.115, + 0.484, + 0.146 + ], + "angle": 0, + "content": "Title: Explicit Feature Tracking and Iterative Refinement for Enhancing Event-based Image Deblurring" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.147, + 0.165, + 0.159 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.16, + 0.482, + 0.191 + ], + "angle": 0, + "content": "Eiffel Chong1, Nuwan Bandara1, Thivya Kandappu1 (thivyak@smu.edu.sg), Archan Misra1" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.192, + 0.175, + 0.205 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.094, + 0.206, + 0.335, + 0.222 + ], + "angle": 0, + "content": "\\(^{1}\\) Singapore Management University" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.253, + 0.16, + 0.269 + ], + "angle": 0, + "content": "JNU620" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.277, + 0.48, + 0.293 + ], + "angle": 0, + "content": "Title: Event-Based Image Deblurring from Team JNU620" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.294, + 0.164, + 0.307 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.308, + 0.356, + 0.323 + ], + "angle": 0, + "content": "Yihang Chen\\(^{1}\\) (Ehang@stu.jnu.edu.cn)," + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.323, + 0.482, + 0.353 + ], + "angle": 0, + "content": "Zhan Li\\(^{1}\\), Weijun Yuan\\(^{1}\\), Wenzhuo Wang\\(^{1}\\), Boyang Yao\\(^{1}\\), Zhanglu Chen\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.354, + 0.176, + 0.368 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.368, + 0.482, + 0.399 + ], + "angle": 0, + "content": "\\(^{1}\\) Department of Computer Science, Jinan University, Guangzhou, China" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.431, + 0.138, + 0.445 + ], + "angle": 0, + "content": "colab" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.455, + 0.483, + 0.486 + ], + "angle": 0, + "content": "Title: Dynamic Enhanced Fusion Network for Event-based Image Deblurring" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.487, + 0.164, + 0.499 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.5, + 0.482, + 0.531 + ], + "angle": 0, + "content": "Yijing Sun\\(^{1}\\) (syj3508852939@163.com), Tianjiao Wan\\(^{1}\\), Zijian Gao\\(^{1}\\), Qisheng Xu\\(^{1}\\), Kele Xu\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.532, + 0.176, + 0.545 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.546, + 0.392, + 0.561 + ], + "angle": 0, + "content": "\\(^{1}\\) National University of Defense Technology" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.593, + 0.148, + 0.608 + ], + "angle": 0, + "content": "CMSL" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.617, + 0.483, + 0.646 + ], + "angle": 0, + "content": "Title: Cascade Event Deblurring Model With Event Edge Loss" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.649, + 0.164, + 0.661 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.663, + 0.482, + 0.693 + ], + "angle": 0, + "content": "Yukun Zhang\\(^{1}\\) (zhangyukun@cmhi.chinamobile.com), Yu He\\(^{1}\\), Xiaoyan Xie\\(^{1}\\), Tao Fu\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.694, + 0.176, + 0.707 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.708, + 0.482, + 0.739 + ], + "angle": 0, + "content": "1 China Mobile (Hangzhou) Information Technology Co., Ltd, Hangzhou, China" + }, + { + "type": "title", + "bbox": [ + 0.092, + 0.77, + 0.148, + 0.785 + ], + "angle": 0, + "content": "KUnet" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.795, + 0.175, + 0.809 + ], + "angle": 0, + "content": "Title KUnet" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.811, + 0.164, + 0.824 + ], + "angle": 0, + "content": "Members:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.825, + 0.483, + 0.855 + ], + "angle": 0, + "content": "Yashu Gautamkumar Patel1 (ypatel37@asu.edu), Vihar Ramesh Jain1, Divesh Basina1" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.857, + 0.175, + 0.87 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.092, + 0.871, + 0.269, + 0.887 + ], + "angle": 0, + "content": "1 Arizona State University" + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.091, + 0.589, + 0.107 + ], + "angle": 0, + "content": "Group10" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.113, + 0.907, + 0.142 + ], + "angle": 0, + "content": "Title: Event-Based Image Deblurring from Team Group10 Members:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.143, + 0.906, + 0.203 + ], + "angle": 0, + "content": "Rishik Ashili\\(^{1}\\) (rishik67_soe@jnu.ac.in), Manish Kumar Manjhi\\(^{1}\\), Sourav Kumar\\(^{1}\\), Prinon Benny\\(^{1}\\), Himanshu Ghunawat\\(^{1}\\), B Sri Sairam Gautam\\(^{1}\\), Anett Varghese\\(^{1}\\), Abhishek Yadav\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.205, + 0.599, + 0.219 + ], + "angle": 0, + "content": "Affiliations:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.219, + 0.836, + 0.234 + ], + "angle": 0, + "content": "1 Jawaharlal Nehru University, New Delhi, India" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.264, + 0.611, + 0.28 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.289, + 0.908, + 0.346 + ], + "angle": 0, + "content": "[1] Inigo Alonso and Ana C Murillo. Ev-segnet: Semantic segmentation for event-based cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.347, + 0.906, + 0.403 + ], + "angle": 0, + "content": "[2] Jiaan Chen, Hao Shi, Yaozu Ye, Kailun Yang, Lei Sun, and Kaiwei Wang. Efficient human pose estimation via 3d event point cloud. In 2022 International Conference on 3D Vision (3DV), pages 1-10. IEEE, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.404, + 0.906, + 0.457 + ], + "angle": 0, + "content": "[3] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration. In European Conference on Computer Vision, pages 17-33. Springer, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.461, + 0.906, + 0.544 + ], + "angle": 0, + "content": "[4] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution \\((\\times 4)\\): Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.546, + 0.906, + 0.629 + ], + "angle": 0, + "content": "[5] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.631, + 0.906, + 0.686 + ], + "angle": 0, + "content": "[6] Zhangyi Cheng, Xiang Zhang, Lei Yu, Jianzhuang Liu, Wen Yang, and Gui-Song Xia. Recovering continuous scene dynamics from a single blurry image with events. arXiv preprint arXiv:2304.02695, 2023. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.688, + 0.906, + 0.743 + ], + "angle": 0, + "content": "[7] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.746, + 0.906, + 0.813 + ], + "angle": 0, + "content": "[8] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.816, + 0.906, + 0.871 + ], + "angle": 0, + "content": "[9] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In International Conference on Learning Representations, 2023. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.515, + 0.873, + 0.906, + 0.902 + ], + "angle": 0, + "content": "[10] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.289, + 0.908, + 0.902 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.125, + 0.092, + 0.483, + 0.176 + ], + "angle": 0, + "content": "Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozchikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.178, + 0.483, + 0.26 + ], + "angle": 0, + "content": "[11] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.263, + 0.483, + 0.331 + ], + "angle": 0, + "content": "[12] Guillermo Gallego, Tobi Delbruck, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew Davison, Jörg Conradt, Kostas Daniilidis, and Davide Scaramuzza. Event-based vision: A survey. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):154-180, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.333, + 0.483, + 0.415 + ], + "angle": 0, + "content": "[13] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.418, + 0.483, + 0.473 + ], + "angle": 0, + "content": "[14] Xuanhua He, Ke Cao, Jie Zhang, Keyu Yan, Yingying Wang, Rui Li, Chengjun Xie, Danfeng Hong, and Man Zhou. Panmamba: Effective pan-sharpening with state space model. Information Fusion, 115:102779, 2025. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.476, + 0.483, + 0.531 + ], + "angle": 0, + "content": "[15] Yuhuang Hu, Shih-Chii Liu, and Tobi Delbruck. v2e: From video frames to realistic dvs events. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1312-1321, 2021. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.533, + 0.483, + 0.615 + ], + "angle": 0, + "content": "[16] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.617, + 0.483, + 0.658 + ], + "angle": 0, + "content": "[17] J Kim, D K Ghosh, and Y J Jung. Event-based video deblurring based on image and event feature fusion. Expert Systems with Applications, 223:119917, 2023. 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.66, + 0.483, + 0.729 + ], + "angle": 0, + "content": "[18] Taewoo Kim, Hoonhee Cho, and Kuk-Jin Yoon. Frequency-aware event-based video deblurring for real-world motion blur. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24966-24976, 2024. 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.731, + 0.483, + 0.814 + ], + "angle": 0, + "content": "[19] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.817, + 0.483, + 0.871 + ], + "angle": 0, + "content": "[20] Huan Li, Hailong Shi, and Xingyu Gao. A coarse-to-fine fusion network for event-based image deblurring. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 974-982, 2024. 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.483, + 0.902 + ], + "angle": 0, + "content": "[21] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.092, + 0.905, + 0.162 + ], + "angle": 0, + "content": "Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.163, + 0.905, + 0.258 + ], + "angle": 0, + "content": "[22] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.26, + 0.905, + 0.356 + ], + "angle": 0, + "content": "[23] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.358, + 0.905, + 0.424 + ], + "angle": 0, + "content": "[24] Jingyun Liang, Jiezhang Cao, Guolei Sun, Kai Zhang, Luc Van Gool, and Radu Timofte. Swinir: Image restoration using swim transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1833-1844, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.427, + 0.905, + 0.509 + ], + "angle": 0, + "content": "[25] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.511, + 0.905, + 0.593 + ], + "angle": 0, + "content": "[26] Kean Liu, Mingchen Zhong, Senyan Xu, Zhijing Sun, Jiaying Zhu, Chengjie Ge, Xin Lu, Xingbo Wang, Xueyang Fu, and Zheng-Jun Zha. Event-conditioned dual-modal fusion for motion deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.595, + 0.905, + 0.663 + ], + "angle": 0, + "content": "[27] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.665, + 0.905, + 0.747 + ], + "angle": 0, + "content": "[28] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.748, + 0.905, + 0.788 + ], + "angle": 0, + "content": "[29] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 7, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.79, + 0.905, + 0.831 + ], + "angle": 0, + "content": "[30] Xingyu Lu, Lei Sun, Diyang Gu, and Kaiwei Wang. Sge: structured light system based on gray code with an event camera. Optics Express, 32(26):46044-46061, 2024. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.832, + 0.905, + 0.9 + ], + "angle": 0, + "content": "[31] Xintian Mao, Qingli Li, and Yan Wang. Adarevd: Adaptive patch exiting reversible decoder pushes the limit of image deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25681-25690, 2024. 10" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.905, + 0.9 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.176 + ], + "angle": 0, + "content": "[32] Nico Messikommer, Stamatos Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-bracket high dynamic range imaging with event cameras. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 547-557, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.178, + 0.482, + 0.247 + ], + "angle": 0, + "content": "[33] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-driven feature tracking for event cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5642-5651, 2023. 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.249, + 0.482, + 0.303 + ], + "angle": 0, + "content": "[34] Manasi Muglikar, Guillermo Gallego, and Davide Scaramuzza. Esl: Event-based structured light. In 2021 International Conference on 3D Vision (3DV), pages 1165-1174. IEEE, 2021. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.307, + 0.482, + 0.389 + ], + "angle": 0, + "content": "[35] Seungjun Nah, Sungyong Baik, Seokil Hong, Gyeongsik Moon, Sanghyun Son, Radu Timofte, and Kyoung Mu Lee. Ntire 2019 challenge on video deblurring and superresolution: Dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition workshops, pages 1996-2005, 2019. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.391, + 0.482, + 0.461 + ], + "angle": 0, + "content": "[36] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.462, + 0.482, + 0.545 + ], + "angle": 0, + "content": "[37] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.548, + 0.482, + 0.616 + ], + "angle": 0, + "content": "[38] Timo Stoffregen, Cedric Scheerlinck, Davide Scaramuzza, Tom Drummond, Nick Barnes, Lindsay Kleeman, and Robert Mahony. Reducing the sim-to-real gap for event cameras. In European Conference on Computer Vision, pages 534-549, 2020. 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.618, + 0.482, + 0.701 + ], + "angle": 0, + "content": "[39] Lei Sun, Christos Sakaridis, Jingyun Liang, Qi Jiang, Kailun Yang, Peng Sun, Yaozu Ye, Kaiwei Wang, and Luc Van Gool. Event-based fusion for motion deblurring with cross-modal attention. In European Conference on Computer Vision, pages 412-428. Springer, 2022. 1, 3, 4, 6, 7, 8, 9, 10, 11, 12" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.704, + 0.482, + 0.786 + ], + "angle": 0, + "content": "[40] Lei Sun, Christos Sakaridis, Jingyun Liang, Peng Sun, Jiezhang Cao, Kai Zhang, Qi Jiang, Kaiwei Wang, and Luc Van Gool. Event-based frame interpolation with ad-hoc deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18043-18052, 2023. 2, 3, 4, 6, 7, 8, 9, 10" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.789, + 0.482, + 0.871 + ], + "angle": 0, + "content": "[41] Lei Sun, Daniel Gehrig, Christos Sakaridis, Mathias Gehrig, Jingyun Liang, Peng Sun, Zhijie Xu, Kaiwei Wang, Luc Van Gool, and Davide Scaramuzza. A unified framework for event-based frame interpolation with ad-hoc deblurring in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[42] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.092, + 0.908, + 0.149 + ], + "angle": 0, + "content": "Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.151, + 0.906, + 0.218 + ], + "angle": 0, + "content": "[43] Lei Sun, Yuhan Bao, Jiajun Zhai, Jingyun Liang, Yu lun Zhang, Kaiwei Wang, Danda Pani Paudel, and Luc Van Gool. Low-light image enhancement using event-based illumination estimation. arXiv preprint arXiv:2504.09379, 2025.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.221, + 0.906, + 0.29 + ], + "angle": 0, + "content": "[44] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.292, + 0.906, + 0.348 + ], + "angle": 0, + "content": "[45] Zhijing Sun, Xueyang Fu, Longzhuo Huang, Aiping Liu, and Zheng-Jun Zha. Motion aware event representation-driven image deblurring. In European Conference on Computer Vision, pages 418-435. Springer, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.349, + 0.906, + 0.419 + ], + "angle": 0, + "content": "[46] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.42, + 0.908, + 0.49 + ], + "angle": 0, + "content": "[47] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.491, + 0.906, + 0.574 + ], + "angle": 0, + "content": "[48] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.576, + 0.906, + 0.631 + ], + "angle": 0, + "content": "[49] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.633, + 0.906, + 0.689 + ], + "angle": 0, + "content": "[50] Wenming Weng, Yueyi Zhang, and Zhiwei Xiong. Event-based blurry frame interpolation under blind exposure. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1588-1598, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.69, + 0.908, + 0.787 + ], + "angle": 0, + "content": "[51] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.789, + 0.906, + 0.844 + ], + "angle": 0, + "content": "[52] Wen Yang, Jinjian Wu, Jupo Ma, Leida Li, and Guangming Shi. Motion deblurring via spatial-temporal collaboration of frames and events. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 6531-6539, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.906, + 0.901 + ], + "angle": 0, + "content": "[53] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Pro" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.092, + 0.908, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.092, + 0.482, + 0.12 + ], + "angle": 0, + "content": "ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.122, + 0.483, + 0.191 + ], + "angle": 0, + "content": "[54] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Learning enriched features for real image restoration and enhancement. In European Conference on Computer Vision, pages 492-511. Springer, 2020. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.192, + 0.482, + 0.274 + ], + "angle": 0, + "content": "[55] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5728-5739, 2022. 4, 5, 8, 9" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.277, + 0.482, + 0.317 + ], + "angle": 0, + "content": "[56] Shaobo Zhang, Lei Sun, and Kaiwei Wang. A multi-scale recurrent framework for motion segmentation with event camera. IEEE Access, 11:80105-80114, 2023. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.319, + 0.482, + 0.374 + ], + "angle": 0, + "content": "[57] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 7" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.483, + 0.374 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_origin.pdf b/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d557f0e63aad1a843e25cdbf7d839c1d27b7c585 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/344c7717-406a-4426-bbde-928913ffd40c_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2312ac3dc90dc97a2292cef83b4cc0a2e8fb2c47cda308dd258ae3201780265 +size 9904152 diff --git a/data/2025/2504_12xxx/2504.12401/full.md b/data/2025/2504_12xxx/2504.12401/full.md new file mode 100644 index 0000000000000000000000000000000000000000..1836b6a9f13acb158ae88f109a33b44bac745e1a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/full.md @@ -0,0 +1,858 @@ +# NTIRE 2025 Challenge on Event-Based Image Deblurring: Methods and Results + +Lei Sun* + +Boxin Shi* + +Andrea Alfarano* + +Radu Timofte* + +Peiqi Duan* + +Danda Pani Paudel* + +Shaolin Su* + +Luc Van Gool* + +Kaiwei Wang* + +Qinglin Liu + +Wei Yu + +Xiaogian Lv + +Lu Yang + +Shuigen Wang + +Shengping Zhang + +Xiangyang Ji + +Long Bao + +Yuqiang + +Jinao Song + +Ziyi Wang + +Shuang Wen + +Heng Sun + +Kean Liu + +Mingchen Zhong + +Senyan Xu + +Zhijing Sun + +Jiaying Zhu + +Chengjie 6 + +Xingbo Wang + +Yidi Liu + +Xueyang Fu + +Zheng-Jun Zha + +Dawei Fan + +Dafeng Zhang + +Yong Yang + +Siru Zhang + +Qinghua Yang + +Hao Kang + +Huiyuan Fu + +Heng Zhang + +Hongyuan + +Zhijuan Huang + +Shuoyan Wei + +Feng Li + +Runmin Cong + +Weiqi Luo + +Mingyun Lin + +Chenxu Jiang + +Hongyi Liu Lei Yu + +Weilun Li + +Jiajun Zhai + +ngting Lin + +Shuang Ma + +Sai Zhou + +Zhanwen Liu + +Yang Wang + +Eiffel Chong + +Nuwan Bandara + +Thivya Kandappu + +Archan Misra + +Yihang Chen + +Zhan Li + +Weijun Yuan + +Wenzhuo Wang + +Boyang Yao + +Zhanglu Chen + +Yijing Sun + +Tianjiao Wan + +Zijian Gao + +Qisheng Xu + +Kele Xu + +Yukun Zhang + +Yu He + +Xiaoyan Xie + +Tao Fu + +Yashu Gautamkumar Patel + +Vihar Ramesh Jain + +Divesh Basina + +Rishik Ashili + +Manish Kumar Manjhi + +Sourav Kumar + +Prinon Benny + +Himanshu Ghunawat + +B Sri Sairam Gautam + +Anett Varghese + +Abhishek Yadav + +# Abstract + +This paper presents an overview of NTIRE 2025 the First Challenge on Event-Based Image Deblurring, detailing the proposed methodologies and corresponding results. The primary goal of the challenge is to design an event-based method that achieves high-quality image deblurring, with performance quantitatively assessed using Peak Signal-to-Noise Ratio (PSNR). Notably, there are no restrictions on computational complexity or model size. The task focuses on leveraging both events and images as inputs for single-image deblurring. A total of 199 participants registered, among whom 15 teams successfully submitted valid results, offering valuable insights into the current state of event-based image deblurring. We anticipate that this challenge will drive further advancements in event-based vision research. + +# 1. Introduction + +Traditional camera output frames with relatively long exposure time in a fixed framerate. In contrast, event cameras, a kind of neuromorphic sensor, asynchronously capture pixelwise intensity changes with high temporal resolution [12], and have been applied in various fields such as computational imaging [32, 39-41, 43], human pose estimation [2], depth estimation [30, 34], image segmentation [1, 56], etc. + +In recent years, significant efforts have been dedicated to event-based image restoration. Among various tasks, event-based image deblurring has gained the most attention, as the high temporal resolution of event cameras provides valuable priors for motion deblurring [39-41]. Notably, these methods operate under the assumption that input images and events are spatially aligned—a condition that applies to all approaches discussed in this paper. + +In conjunction with the NTIRE 2025 Workshop on New Trends in Image Restoration and Enhancement, the Event-Based Image Deblurring Challenge was organized. The objective is to develop a network architecture or solution that effectively integrates events and images to enhance image deblurring performance. We hope that this challenge will serve as a starting point for promoting event-based image + +enhancement on a broader stage and contribute to the thriving development of the event-based vision community. + +This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [47], reflection removal in the wild [51], shadow removal [46], event-based image deblurring [42], image denoising [44], XGC quality assessment [27], UGC video enhancement [37], night photography rendering [10], image super-resolution (x4) [4], real-world face restoration [5], efficient super-resolution [36], HR depth estimation [53], efficient burst HDR and restoration [19], cross-domain few-shot object detection [11], short-form UGC video quality assessment and enhancement [22, 23], text to image generation model quality assessment [13], day and night raindrop removal for dual-focused images [21], video quality assessment for video conferencing [16], low light image enhancement [28], light field super-resolution [48], restore any image model (RAIM) in the wild [25], raw restoration and super-resolution [7] and raw reconstruction from RGB on smartphones [8]. + +# 2. NTIRE 2025 Event-Based Image Deblurring Challenge + +The goals of this challenge include: (1) promoting research in the area of event-based image deblurring, (2) facilitating comparisons between various methods, and (3) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge, including the dataset, challenge phases and evaluation criteria. + +# 2.1. Dataset + +The HighREV dataset [40] is used for both training and evaluation in this challenge. It consists of 1,771 sets of blurry images, corresponding events, and sharp images for training. Additionally, 421 sets are provided as validation data during the development phase, ensuring a comprehensive benchmark for assessing model performance. + +# 2.2. Tracks and Competition + +The aim is to obtain a network design capable to produce high-quality results with the best performance measured by PSNR for event-based image deblurring. + +Challenge phases Participants were given access to training images from the HighREV dataset. During the validation phase, they could use 421 images from the validation set for model tuning. In the test phase, evaluation was performed on 271 images from the test set. To ensure a fair assessment, the ground-truth images for the test phase remained hidden from participants throughout the challenge. + +
TeamRankPSNR (primary)SSIM
IVISLAB142.790.9196
MiVideoDeblur242.700.9281
404NotFound342.090.9300
Give_it_a_try440.370.9234
BUPTMM540.210.9179
WEI639.460.9171
DVS-WHU739.260.9101
PixelRevive839.120.9112
CHD938.560.9055
SMU1038.300.9047
JNU6201137.630.9019
colab1236.840.8962
CMSL1331.810.8900
KUnet1429.420.8600
Group101525.930.8200
+ +Table 1. Results of NTIRE 2025 Event-Based Image Deblurring Challenge. PSNR and SSIM scores are measured on the 271 test images from HighREV dataset. Team rankings are based primarily on PSNR. + +Evaluation protocol Since the aim of this challenge is to foster the development of accurate event-based image deblurring networks, PSNR and SSIM on the 271 testing images are used as the quantitative evaluation metrics. A code example for calculating these metrics is available at https://github.com/AHupuJR/NTIRE2025_EventDeblurChallenge. The code of the submitted solutions and the pretrained weights are also available in this repository. + +# 3. Challenge Results + +Table 1 shows the final rankings and test results of the participated teams. The implementation details of each team can be found in Sec.4, while team member information can be found in Appendix A. IVISLAB achieved the first place in terms of PSNR, followed by MiVideoDeblur and 404NotFound as the second and third place, respectively. + +# 3.1. Participants + +The challenge attracted 199 registered participants, with 15 teams successfully submitting valid results. + +# 3.2. Main Ideas and Architectures + +Throughout the challenge, participants explored various innovative techniques to improve deblurring performance. Below, we summarize some of the key strategies employed by the top-performing teams. + +1. Hybrid architectures demonstrated strong performance, with all top-3 teams utilizing a combination of transformers and convolutional networks. This approach leverages global features extracted by transformers alongside local features captured by convolutional + +layers, both of which contribute to effective event-based image deblurring. Besides, both spatial and channel attention mechanisms play a crucial role in enhancing overall performance. + +2. Pretrained weights matters. The winning team, IVISLAB, leveraged a backbone model initialized with pretrained weights from ImageNet, demonstrating the advantages of transfer learning in event-based image deblurring. +3. Cross-modal fusion proves beneficial. Several teams adopted EFNet [39] and REFID [40, 41] as a baseline model to fuse features from the event and image branches. +4. Effective training strategies. Both the second and third-place teams employed progressive learning techniques during training. Additionally, the winning team utilized a large patch size $(512 \times 512)$ , which contributed to improved performance. +5. Incorporating a novel Mamba-based architecture. Integrating features from both image and event modalities is crucial for enhancing the reconstruction quality of event-based deblurring methods. Team DVS-WHU introduced an innovative Mamba-based architecture to achieve more effective fusion. + +# 3.3. Fairness + +To maintain fairness in the event-based image deblurring challenge, specific rules were implemented, primarily regarding the datasets used for training. Participants were permitted to use external datasets for training. However, incorporating the HighREV validation set, whether sharp or blurry images, was strictly prohibited, as this set served to evaluate the overall performance and generalizability of the models. Additionally, the use of HighREV test blurry images for training was not allowed. On the other hand, employing advanced data augmentation techniques during training was considered an acceptable practice. + +# 4. Challenge Methods and Teams + +# 4.1. IVISLAB + +To achieve image deblurring, team IVISLAB introduces the Triple Event-stream Image Deblurring Network (TEIDNet). As depicted in Figure 1, TEIDNet converts consecutive events into event voxels at three temporal scales to perceive motion information from blur images and capture fine edges for reconstructing clear images. Furthermore, TEIDNet integrates Shift Window Attention and Channel-Wise Attention blocks to capture local and global contexts, thereby enhancing deblurring accuracy. + +# 4.1.1. Network Architecture + +TEIDNet adopts an encoder-decoder architecture to process images and triple-stream event voxels, aiming to estimate + +![](images/c1b557d47e8506e24eb01ecd1ef1e495035f8ebd33e86e59e2ddd403108412fb.jpg) +Figure 1. The model architecture of TEIDNet, proposed by Team IVISLAB. + +the deblurred image. Specifically, when deblurring the image at frame $t$ , TEIDNet considers that the long-term event stream surrounding frame $t$ can aid in motion perception. Therefore, it voxelizes the event data from frame $t - T_{l}$ to frame $t + T_{l}$ into a $b$ -bin event voxel $V_{l,t}$ . Simultaneously, since the short-term event stream around frame $t$ can help reconstruct high-frequency textures, TEIDNet voxelizes the event data from frame $t - T_{s}$ to frame $t + T_{s}$ into a $b$ -bin event voxel $V_{s,t}$ . Furthermore, to mitigate color artifacts by leveraging higher-resolution motion information near the current frame, TEIDNet voxelizes the event data from frame $t - T_{m}$ to frame $t + T_{m}$ into a $b$ -bin event voxel $V_{m,t}$ . Subsequently, the event voxels $V_{l,t}, V_{s,t}$ , and $V_{m,t}$ , along with the blur image $I_{b}$ , are concatenated and fed into the network. To effectively fuse the features from the image and event voxels, TEIDNet employs convolutional layers to generate fused feature representations. The network then utilizes a dual-branch encoder. The first, a complex branch extracts high-level semantic information from the fused features by leveraging shift window attention to capture local context and channel-wise attention blocks to capture global context. The second, a simple branch utilizes convolutional layers to capture fine-grained details from the fused features. Next, TEIDNet's decoder integrates multiple shift window attention blocks to fuse and upsample the features extracted by the dual-branch encoder. Finally, convolutional layers are employed to predict the deblurred image $I_{t}$ . + +# 4.1.2. Loss Function + +To train TEIDNet, they define a reconstruction loss $\mathcal{L}_r$ for the estimated deblurred image $I_{t}$ as follows: + +$$ +\mathcal {L} _ {r} = \lambda_ {1} \mathrm {L} _ {1} \left(I _ {t}, I _ {t} ^ {g t}\right) + \lambda_ {2} \mathrm {L} _ {2} \left(I _ {t}, I _ {t} ^ {g t}\right) \tag {1} +$$ + +Here, $\lambda_{1}$ and $\lambda_{2}$ are coefficients that balance the loss terms. The function $\mathrm{L}_1(\cdot ,\cdot)$ represents the mean absolute error, while $\mathrm{L}_2(\cdot ,\cdot)$ denotes the mean squared error. The term $I_t^{gt}$ refers to the ground truth image at frame $t$ + +# 4.1.3. Implementation Details + +TEIDNet is implemented using PyTorch on four Nvidia L20 GPUs. During training, a batch size of 16 is utilized, with + +![](images/8611a01662b0ce6655ccee7173684d1e4b03e2302ec0f1864c85d316a26da03d.jpg) +Figure 2. The framework of DASTF-Net, proposed by Team MiVideoDeblur. + +input data dimensions of $512 \times 512$ pixels. The network weights are optimized over 1000 epochs using the AdamW optimizer, with an initial learning rate set to $2 \times 10^{-5}$ . A cosine annealing scheduler is employed to decay the learning rate progressively. In addition, they take the checkpoint with good performance and perform a second finetune. To mitigate overfitting, data augmentation techniques such as random flipping and rotation are applied. They also initialize the backbone network parameters using weights pretrained on ImageNet. The specific coefficients and parameters are defined as follows: number of bins $b = 7$ , long-term temporal window $T_{l} = 5$ , medium-term temporal window $T_{m} = 1$ , short-term temporal window $T_{s} = 0$ , and loss function weights $\lambda_{1} = 1$ , $\lambda_{2} = 1$ . + +# 4.2. MiVideoDeblur + +Introduction. As illustrated in Fig. 2, their team proposed the Dual Attention Spatio-Temporal Fusion Network(DASTF-Net). Motivated by EFNet [39], their model employs a two-stage encoder-decoder architecture. Initially, two encoders separately extract multi-scale features from both the image and event data. Based on the EGACA module [40] and the FAF module [45], they have designed the Temporal Fusion Residual Block (TFRB) and Multi-Scale Cross-Attention Fusion Block (MSCAFB), which perform feature fusion in the temporal and spatial dimensions, respectively. By incorporating a dual-attention mechanism, these modules effectively enhance the model's performance. Following feature fusion, the fused features are fed into a Restormer [55], which further leverages the feature information to improve the model's performance. + +Training strategy. They employed a four-stage training strategy. In the first stage, the network was trained for 160k iterations using the PSNRLoss function. AdamW Optimizer was used, with an initial learning rate of 2e-4 and a cosine annealing learning rate schedule for updates. Subsequently, in the second stage, data augmentation techniques were introduced, which included adding random Gaussian + +noise and applying random scaling to the input data. Building upon the model from the first stage, the training continued for 80k iterations with an initial learning rate of 1e-4. For the third and fourth stages, the patch size was progressively increased from 256 to 320 and then to 480. The network was trained for 40k iterations in the third stage and 45k iterations in the fourth stage. + +# 4.3. 404NotFound + +Their team proposes EV-Deblurformer[26], a framework consisting of two complementary models designed to fully leverage the temporal dynamics of video sequences and the rich texture details present in single images. The framework includes two distinct components: Video-SFHformer, developed for video-based deblurring, and EFSformer, tailored for single-image deblurring. In Video-SFHformer, they introduce STFBlock to enhance the model's capacity for long-range temporal modeling. In EFSformer, they incorporate STEFusionBlock, which fuses event features from the frequency domain to improve spatial detail restoration. To achieve optimal performance, as shown in Section 4.3.3, a sequence-level ensemble strategy is employed to merge the outputs of both models. A progressive training scheme is also adopted to enhance robustness and effectiveness. + +# 4.3.1. Overall Pipeline + +Figure 3 illustrates the overall architecture of their proposed method, EV-Deblurformer. This approach, built upon the two models: Video SFHformer and EFSformer, fully exploits the rich temporal dynamics and sharp edge information provided by event data. For the video deblurring model, they propose the Video-SFHformer based on SFHformer. For the single-image motion deblurring model, they propose the EFSformer built on EFNet[39]. + +# 4.3.2. Implementation Details + +They implement their proposed network via the PyTorch 2.1.2 platform. Adam optimizer with parameters $\beta_{1} = 0.9$ and $\beta_{2} = 0.999$ is adopted to optimize their network. Motivated by [55] they introduce the progressive training strategy. The training phase of their network could be divided into two stages: + +(1) Initial training of EV-Deblurformer. They use a progressive training strategy at first. For the video-based motion deblurring model, they start training with patch size $152 \times 152$ with batch size of 16 for 250K iterations. The patch size and batch size pairs are updated to $[(192^2, 12), (256^2, 8), (304^2, 8)]$ at iterations [250K, 200K, 150K]. The initial learning rate is $2 \times 10^{-4}$ and remains unchanged when patch size is 192. Later, the learning rate is set to $1 \times 10^{-4}$ and $7 \times 10^{-5}$ for patch and batch size pairs of $(256^2, 8)$ and $(304^2, 8)$ , respectively. They employ a + +![](images/9f72a3b5343b2954180c60b89da07dea92ecc4de71c2198cbf209ef89d3a6ac9.jpg) + +![](images/d4312036562aa983d2f712981c79dd05fea8c0c2fd66cd31016a474e139d5a33.jpg) + +![](images/a3e30ae4e1827997e27cf7621f6fffe30946062565c228e2edb467ce551cb086.jpg) +Figure 3. The architecture diagram of EV-Deblurformer, proposed by Team 404NotFound, is designed for event-guided motion deblurring. + +cosine annealing learning rate decay strategy, gradually reducing the learning rate. For the single-image-based motion deblurring model, They begin training with a patch size of $192 \times 192$ and a batch size of 12 for 250K iterations. During training, patch size and batch size pairs are progressively updated to $(256^{2}, 10)$ , $(288^{2}, 8)$ , and $(320^{2}, 8)$ at 36K, 24K, and 24K iterations, respectively. The initial learning rate is set to $5 \times 10^{-4}$ , and later adjusted to $1 \times 10^{-4}$ , $7 \times 10^{-5}$ , and $5 \times 10^{-5}$ corresponding to the updated patch and batch size configurations. A cosine annealing schedule is employed to gradually decay the learning rate throughout the training process. The first stage is performed on the NVIDIA RTX 4090 GPU. They obtain the best model at this stage as the initialization of the second stage. +(2) Fine-tuning EV-Deblurformer. For the video-based motion deblurring model, they start training with a patch size of $320 \times 320$ and a batch size of 4 for 150K iterations. The initial learning rate is set to $1 \times 10^{-5}$ and is adjusted to $1 \times 10^{-7}$ using a cosine annealing schedule, over a total of 150K iterations. They use the entire training data from the challenge without applying any data augmentation techniques. The exponential moving average (EMA) is employed for the dynamic adjustment of the model parameters. For the single-image-based motion deblurring model, they + +adopt the same training strategy as used in the video-based motion deblurring model. The second training stage is conducted on an NVIDIA RTX 4090 GPU. + +(3) Evaluation Metrics They utilize two widely adopted reference-based evaluation metrics—Peak Signal-to-Noise Ratio (PSNR) and Structural Similarity Index Measure (SSIM)[49]—to evaluate the effectiveness of their method, following prior works[3, 24, 54, 55]. Higher PSNR and SSIM values generally reflect better performance in image restoration tasks. + +# 4.3.3. Ensemble Strategies + +Ensemble learning has been proven to be an effective technique in image restoration. Its most basic application involves integrating the outputs of multiple models and applying a fusion strategy to achieve results with better generalization and greater stability in restoration quality. + +The HighREV-test dataset consists of four sequences. Among them, one is an outdoor scene, which differs markedly from the other three in terms of object diversity, texture richness, and color composition. Based on this observation, they explore a sequence-level ensemble strategy that selectively exchanges outputs between Video-SFHformer and EFSformer. + +![](images/53355ba7ec22077eb962d8f0e9c27950cccf02c59a1dd759a83c4d3ea57b20ea.jpg) +Figure 4. An overview of the method proposed by Team BUPTMM: They set the weights for the fusion, with $\alpha$ set to 0.6 and $\beta$ to 0.4. + +Specifically, they start with the best-performing Video-SFHformer model and replace the output of the outdoor sequence in the HighREV-test set with the corresponding result generated by EFSformer. The results in Table 1 show that their approach yields the best performance, achieving the highest SSIM score and ranking third overall in the NTIRE Event-Based Image Deblurring Challenge. + +# 4.4. Give_it_a_try + +# 4.4.1. General method + +This submission is mainly based on the public code of another team. Models used in this submission are EFNet att track fusion and EFNet att track fusion new, which can be found atarchs orarchs/tested. They change the training strategy, finetune the models and combine two best models to push the limits of scoring. + +- How event modality is utilized in the deblurring process: They used the given SCER format event voxels in training, validating and training. The usage is as same as original EFNet [39] since new networks retain the encoder module of the baseline. + +# 4.4.2. Implementation details + +# - Training: + +In the first stage of training, all models are trained for $2 \times 10^{5}$ iterations with a batch size of 16 by PSNR loss function with AdamW optimizer. In each training batch, each paired images and event voxel are randomly cropped to $256 \times 256$ and augmented by random flipping and rotation. The learning rate is initialized as $3 \times 10^{-4}$ , and a cosine annealing scheduler is used to drop the final learning rate as $10^{-7}$ . They finetuned the models obtained from the first stage with a patch size of $512 \times 512$ . At this stage, all models are trained for another $2 \times 10^{5}$ iterations with a batch size of 4 and the learning rate drop from $2 \times 10^{-5}$ to $10^{-6}$ . Models are validated for every $10^{4}$ iterations. Other settings remain unchanged. + +# - Validating and Testing: + +They chose the highest validated models for each network during the fine-tuning stage and average two models' output as final result to improve robustness. + +# 4.5. BUPTMM + +# 4.5.1. Architecture + +Our solution is built on EFNet[39] and STCNet[52]. Inspired by [50], they introduce a detail enhancement module that follows the EFNet prediction stage. The whole pipeline is illustrated in Fig. 4. The detail enhancement module adopts a simple U-Net structure. + +# 4.5.2. Implementation Details + +Both EFNet and STCNet are initialized with pre-trained GoPro checkpoints. They fine-tune them separately using the NTIRE official training dataset without additional data, aside from the pre-trained GoPro weights. The patch size is set to $1024 \times 1024$ , and they employ the CosineAnnealingLR scheduler to adjust the learning rate. + +The key differences in the training strategies for EFNet and STCNet are as follows: + +For EFNet, they train EFNet for 100k iterations with a batch size of 4 using 4 NVIDIA H800 GPUs. The optimizer is AdamW with an initial learning rate of 2e-4. They generate the event voxel grid following the official script, setting the bin size to 24. Due to differences in the event encoder's channel size, they extended the pre-trained GoPro checkpoint weights from 6 to 24 bins. The loss function consists of the L1 loss, the Charbonnier loss, and the Sobel loss, with respective weights of 1.0, 0.5, and 0.5. Unlike the official EFNet implementation, they do not apply a mask between the two stages. + +ForNet, they train STCNet for 1000 epochs with a batch size of 8 using 4 NVIDIA H800 GPUs. The optimizer is Adam with an initial learning rate of 2e-4. They use the official event voxel grid with a bin size of 6. The loss function is the Charbonnier loss. + +# 4.6. WEI + +Since REFID [40] is an excellent method of event-based blurry video frame interpolation (VFI), considering the differences in modeling image deblurring and VFI problems, they adapt the REFID structure to fit the image deblurring challenge. As shown in Fig. 5, they develop a Bi-directional Gathered Recurrent Network (BGRN) for event-based image deblurring. + +# 4.6.1. Network Architecture + +Following REFID [40], the events within the exposure time $(t - \Delta t\to t + \Delta t)$ are represented as a voxel grid $V_{t - \Delta t\rightarrow t + \Delta t}\in \mathbb{R}^{(M + 1)\times H\times W}$ , where $M$ is set to 9. Furthermore, they divide the voxel $V_{t - \Delta t\rightarrow t + \Delta t}$ into two segments $V_{t - \Delta t\rightarrow t}$ and $V_{t + \Delta t\rightarrow t}$ to perform forward and backward iterations, respectively. + +The BGRN consists of image and event branches. Only a blurry image $B_{t}$ is fed into the image branch, and the + +![](images/9f8be6c650b99795b0cdb6d83e07801f8311e1e0282f1bf2309fd93b92bb1313.jpg) +Figure 5. The architecture of the Bi-directional Gathered Recurrent Network (BGRN), proposed by Team Wei, is designed for event-based image deblurring and serves as an enhanced reconfiguration network for REFID. [40]. "EVR Block": event recurrent block [40], "EGACA": event-guided adaptive channel attention [40], "SConv": stripped convolution, "TConv": transposed convolution, "Bi-Fusion": bidirectional fusion. + +network output is the corresponding sharp image $\hat{I}_t$ . Besides, they split the original event branch into a forward recurrent branch and a backward recurrent branch, which respectively and recurrently consumes sub-voxels of forward event voxel $V_{t - \Delta t\to t}$ and backward event voxel $V_{t + \Delta t\rightarrow t}$ in a gathered way. In each recurrent iteration, the sub-voxel $V_{sub}\in \mathbb{R}^{2\times H\times W}$ is fed to the event branch, which encodes the event information for the latent frame. To fuse the features obtained from forward and backward recurrent branching, the outputs of both directions are fed into a channel cascade and $1\times 1$ convolution at each scale ("Bi-Fusion" in Fig. 5). Then, they are added element by element with the features of the corresponding scale of the decoder. In addition, to reduce redundancy, they removed the recurrent structure of the decoder section and replaced it with residual blocks. Finally, to make the network learn high-frequency information, the output of the last residual block and the initial features of the blurred image are added element by element, and then the sharp image $\hat{I}_t$ is obtained through a $3\times 3$ convolution. + +# 4.6.2. Implementation details + +Training strategy. They train BGRN with the HighREV training dataset specified by the organizer with a batch size of 4 for 200k iterations on an NVIDIA GeForce RTX 3090 GPU. They crop the input images and event voxels to $256 \times 256$ for training and use horizontal and vertical flips for data enhancement. AdamW [29] with an initial learning + +rate of $2 \times 10^{-4}$ and a cosine learning rate annealing strategy with $1 \times 10^{-7}$ as the minimum learning rate are adopted for optimization. They use a PSNR loss [39] as supervision. Ensemble strategy. During testing, they found that images prefixed with "zigzag" showed a large difference in brightness compared to other normal images. To adapt to this sudden change in brightness, they select images with the prefix "sternwatz_window" similar to this scene from the training set. Then, they double their brightness to fine-tune the pre-trained BGRN model for 5k iterations with an initial learning rate of $2 \times 10^{-5}$ . Therefore, the ensemble strategy is applied when testing, i.e., the abnormally bright images (prefixed with "zigzag") are processed with the fine-tuned model, and the others are processed with the initial pretrained model. + +# 4.7.DVS-WHU + +# 4.7.1. Network Architecture + +Positioned at Fig. 6, the proposed Dual Channel Cross-modal Mamba (DCCM) architecture comprises three primary components: two Shallow Feature Extraction (SFE) modules, a series of $N$ dual channel blocks (with $N = 20$ in their experimental configuration), each containing two Residual Dense Blocks (RDB) [57] and two Cross Modal Mamba (CMM) [14] blocks, and a Global Feature Fusion (GFF) module. Initially, both blur image and events (represented in 24-bin voxel grids) are processed through the SFE module for preliminary feature extraction. Subsequently, + +![](images/c721fffa4b1c80802496768db4fde67eeb3302413c722054c2d6fddc7e8ce50f.jpg) + +![](images/8c4442be526dca917ebfb53309df0e69db477c4ded87f55e59353dfeadb1e786.jpg) +Figure 6. Architecture of DCCM, proposed by Team DVS-WHU. + +the dual channel blocks facilitate in-depth feature extraction and cross-modal interaction. Finally, the GFF module synthesizes the ultimate latent sharp image. + +The core concept of their network is to establish a mutual compensatory relationship between the features derived from event data and those from blurred images through a dual-channel framework. Specifically, while event data are often characterized by significant noise, images typically exhibit lower noise levels. The CMM block is employed to incorporate image features into the event data, thereby mitigating the noise present in the events. Conversely, event data are rich in sharp edge information, and the CMM block also facilitates the integration of event features into blurred images, ultimately contributing to the deblurred result. + +# 4.7.2. Implementation Details + +The network is created with PyTorch and trained on two NVIDIA GeForce RTX 3090 GPUs for 150 epochs with ground-truth-guided L1 norm loss. The training process is composed of two phases. During the first phase, they follow the strategy of Cheng et al.[6] and pretrain their DCCM on the mixed dataset including synthetic REDS dataset[35] and semi-synthetic HQF dataset[38] with a learning rate fixed at $1 \times 10^{-4}$ for 50 epochs. In the second phase, the network is fine-tuned on the HighREV dataset[40] where the images are randomly cropped into $256 \times 256$ patches with horizontal flipping for data augmentation and the learning rate linearly decays to $1 \times 10^{-5}$ until the 150th epoch. + +# 4.8. PixelRevive + +The model they used was the same as the EFNet[39]. The key to the improved performance of their model lied in the utilization of additional datasets during training and + +the adoption of larger image sizes in the final fine-tuning phase. They employed a two-stage training strategy. First, they used an Events Simulator called V2E[15] to generate Events from REDS dataset. To generate the dataset, they used timestamp resolution as 0.001, dvs exposure duration as 0.001. The remaining parameters were configured identical to those specified in the V2E paper. They get over 20,000 pairs of events, blur images and sharp images. They trained the model on REDS for 250,000 iters, with gt_size 256, patch size 8. When training on simulated datasets with the HighREV validation set, they observed a paradoxical divergence: while the training PSNR consistently improved, the validation PSNR exhibited a decline. This counterintuitive phenomenon may stem from distributional discrepancies between synthetic data and HighREV characteristics across multiple feature dimensions. + +Then, they finetuned it on HighREV train dataset for 200,000 iters, with gt_size 512, patch size 8. The True-CosineAnnealingLR scheduler was employed in both training phases, configured with a period matching the total training iterations and a minimum learning rate value of 1e-7. After experiments, they found that larger gt_size can improve the PSNR by about 0.5. Experiments showed performance decreases when gt_size exceeds 512 (tested range: 256-608), making 512 the optimal size. Other strategy is same as the EFNet. + +# 4.9. CHD + +As illustrated in Fig. 7, team CHD develops an efficient Event-Image Deblurformer Network (EIDFNet) based on the Restormer architecture [55]. To address the computational bottleneck encountered when restoring high-resolution blurry images using event data, they incorporate key design elements from EFNet [39]. + +# 4.9.1. Network Architecture + +Considering the speed of model training, they still used the official 6-channel voxel grid event representation to achieve a balance between efficiency and precision. They input the blurred image and the event representation with consistent spatial resolution into the network and employ the modified Transformer Block to fuse the cross-modal feature. Firstly, they modify the transformer block in Restormer [55] as a fusion module to achieve full interaction between different feature channels by setting the number of input and output dims in the GDFN and adding $1 \times 1$ convolution in the residual connections. Additionally, they build a mutually enhanced fusion encoder based on the Event-Image CrossModal Attention Fusion Module (EICA) proposed in EFNet [39]. The enhanced image features are obtained using K and V derived from event embeddings, while Q is sourced from image embeddings. Conversely, the enhanced event features are generated with K and V originating from image embeddings, with Q being drawn from event embeddings. + +![](images/45b31251263c662d830970b24c8d4673eaabe0b4c9988af96695c9a7b73ede74.jpg) +Figure 7. The framework of Event-Image Deblurformer Network (EIDFNet), proposed by Team CHD. + +In order to achieve comprehensive integration of event and image features, the enhanced image features and enhanced event features are concatenated along the channel dimension. Subsequently, these concatenated features are fused using a Modified Transformer Block. Ultimately, each encoder produces enhanced image features, enhanced event features, and fused features. The enhanced event and image features undergo downsampling before being input into the subsequent encoder. The fusion feature is directly linked to the corresponding decoding feature through a skip connection. + +# 4.9.2. Training Strategy + +They perform progressive learning strategy flow the settings in Restormer [55] and trained the model on a A100 GPU with L1 loss. The network is trained on smaller image patches in the early epochs and on gradually larger patches in the later training epochs. During the training process, the batch sizes are [4,3,2,2,1,1], and the patch sizes are [128,160,192,256,320,384] with the iterations are [92000,64000,48000,36000,36000,24000]. They employ the AdamW optimizer with an initial learning rate 3e-4 that follows a CosineAnnealingRestartCyclicLR decay strategy. + +![](images/b9251cf46272ae3008bd4f011fa63a877d1c023a96ebea5494bbf7f13e3a452f.jpg) +Figure 8. Overview of the proposed pipeline by Team SMU. + +# 4.10. SMU + +# 4.10.1. Motivation + +Inspired by recent successes in cross-knowledge sharing between events and RGB frames [39], hierarchical temporal and frequency modelling [18, 40] and stage-wise fine-fusion [20] for the task of event-based RGB deblurring, they propose to modify the base EFNet model [39] such that the modified model serves as a unified framework which (1) iteratively fine-tunes the coarser deblurred images through two stages of extensive fine-fusion to combat the insufficiencies of the existing decoding techniques while (2) can optionally be made to be specifically aware of propagated frequency information in latent representations to locally and globally filter the blur features in the RGB images through leveraging event features in the frequency domain. + +In addition, to the best knowledge, none of the existing methods for event-based RGB deblurring recognizes the importance of feature tracking in this task which can be beneficial especially in challenging conditions such as high contrast (i.e. very bright or dark surroundings) and fast motion (i.e., large pixel displacements within an accumulated event volume) scenarios [33] towards robust performance. To address this limitation, they explicitly employ a data-driven feature tracking module in the pipeline, an inline feature tracker block, such that event feature tracks corresponding to different points in the reference RGB frame are intuitively incorporated in the learning process specifically in the initial stages of the unified framework. + +# 4.10.2. Network Architecture + +As depicted in Fig. 8, they propose three main modifications: the inline feature tracker module, bidirectional frame fusion and AdaRevD refinement, to the original EFNet, backed by the motivation as described in section 4.10.1 and validated through the experiments. To this end, they design the inline feature tracker such that the latent RGB and event + +features are merged and learned through a flow autoencoder block in combination with a Conv-LSTM block to retrieve the temporal alignment of features. Furthermore, it is to be noted that they place the tracker at an initial stage of the pipeline to ensure that the tracker has the access to the high-level features of each modality, rather than the deeper low-level features, since high-level features, which are close to the input data, are more promising to contain information on temporal propagation, which is critical for co-aligned feature tracking. + +Inspired by [20], they design the first stage of refinement using a bidirectional frame fusion block, specifically targeting the spatiotemporal information flow between adjacent coarse frames while in the second stage of refinement, they further refine the output from the first refinement stage with an objective to identify the still remaining degradation patterns in the RGB space and tackle them using an adaptive patch exiting reversible decoder module [31]. Optionally, to implement the frequency-based filtering of blur features, they follow the cross-modal frequency (CMF) module proposed by [18] such that latent representations at each level of the first U-Net are passed through CMF modules, and concatenated in the decoder levels, in a hierarchical fashion to enhance the latent feature representations with frequency-aware characteristics. + +# 4.10.3. Implementation Details + +They train the models using one NVIDIA 3090 GPU machine in two stages: (1) primary event-RGB fusion pipeline including the proposed frequency-aware module, explicit feature tracking and the first iteration of refinement based on the bidirectional frame fusion block and (2) second iteration of refinement based on AdaRevD framework [31]. + +By following the baseline implementation [39], they train the models on the HighREV dataset, in both stages, with an initial learning rate of $2 \times 10^{-4}$ for a total of $2 \times 10^{4}$ iterations. The utilized optimizer is AdamW [29] and the learning objective is set to be PSNR loss [39]. + +# 4.11.JNU620 + +As shown in Fig. 9, their framework adopts EFNet [39] as the baseline architecture. To enchance frequency-aware feature processing, a selection frequency block (SF Block) [9] is integrated following each decoder. The architecture introduces two key components: 1) A multi-branch dynamic selection frequency (MDSF) module that adaptively decouples feature mappings into distinct frequency components through dynamic convolution operations; 2) A multi-branch compact selection frequency (MCSF) module specifically designed to expand the receptive field for processing degraded blurry images. Multiple data augmentation strategies were employed, including horizontal and vertical shiftings. For data preparation, they implemented multiple augmentation strategies including horizontal and vertical spa + +tial shifts. The model was trained for 120,000 iterations on an NVIDIA GeForce RTX 3090 GPU with a batch size of 4. The models were optimized by the Adam method with $\beta_{1} = 0.9$ and $\beta_{2} = 0.99$ and the weight decay was set to $10^{-4}$ . The initial learning rate was set to $2 \times 10^{-4}$ , gradually decreased following a cosine annealing schedule. In inference phase, each test image undergoes augmentation through horizontal and vertical flips before input into the model. The final restored image is generated by averaging all augmented outputs. + +# 4.12. colab + +Our team proposes an improved method based on EFNet, named DEFNet (Dynamic Enhanced Fusion Network). This method incorporates three key enhancements. First, we introduce a multi-scale dynamic fusion module, which fuses event and image features at multiple spatial resolutions, significantly improving the restoration of fine details in blurred areas[17]. Second, we enhance the original EICA module by integrating a bidirectional attention mechanism, enabling more effective mutual guidance and interaction between image and event features. Third, for processing event data, we adopt a weighted interpolation strategy[40] that models the dynamic weighting of event sequences more accurately, thereby enriching the temporal details provided to the image restoration process. + +# 4.12.1. Network + +Fig. 10 presents the architecture of DEFNet, which is built upon EFNet and incorporates the newly introduced modules: the multi-scale dynamic fusion module and the enhanced EICA module with a bidirectional attention mechanism. These components work collaboratively to optimize the motion deblurring process by improving feature representation and fusion between the image and event data. + +During the deblurring process, event streams are used to provide fine-grained temporal variation information that guides the restoration of motion blur in image frames. Specifically, the Symmetric Cumulative Event Representation (SCER) encodes the temporal distribution of events while the enhanced Event-Image Cross-modal Attention Fusion (EICA) module leverages bidirectional attention to facilitate deeper interaction between modalities. Additionally, the integration of weighted interpolation improves the temporal alignment and accuracy of event feature extraction. Together, these components enable DEFNet to more effectively restore motion-blurred images by enhancing edge sharpness, preserving texture, and capturing motion dynamics with higher fidelity. + +# 4.12.2. Implementation Details + +We use the AdamW optimizer with an initial learning rate of 2e-4, weight decay of 1e-4, and betas set to [0.9, 0.99]. + +![](images/631527531248f11e51e7497f1fac20dcc126570e11b65b294387f6222f8be17d.jpg) +Figure 9. The model framework proposed by Team JNU620. + +![](images/94f49739a4b00f51de806981019b97f06eafd67affb2ff59cff77e96de1c28d9.jpg) +Figure 10. DEFNet architecture, proposed by Team colab. + +To dynamically adjust the learning rate, we used the True-CosineAnnealingLR scheduler with a maximum iteration count of T_max = 200000 and a minimum learning rate of 1e-7. During training, the batch size was set to 4, and 3 worker threads were used per GPU. The total number of training iterations was set to 40000. This method was trained and validated on the HighREV dataset. The model achieved significant improvements on both the training and validation sets, with PSNR and SSIM used as evaluation metrics during training. Validation was performed every 10,000 iterations, and the model was regularly saved. + +# 4.13.CMSL + +The Cascade Event Deblurring Model With Event Edge Loss was built based on EFNet [39]. An motion edge loss and a cascade framework were introduced to enhance the performance of EFNet. + +The EFNet backbone was adopted and two improvements were proposed. Firstly, the event data were organized and represented as voxel [39]. Then, two frame of the event voxels that were most close to the center of the exposure time were multiplied to produce a motion edge frame. The motion edge frame contains the edge of the moving objects in the current frame as shown in fig. 11, fig. 12 is the corresponding edge of the ground truth image (sharp image). As shown in fig. 11 and fig. 12, the motion edge contains clear lines that were consistent with the true edges and could served as a guiding information for image deblurring. The edge of the deblured image output by the module should be similar to the motion edge. Therefore, a motion edge loss were proposed as follow: + +$$ +\ell_ {e d g e} = \operatorname {m s e} (e d g e (\widehat {x}) \cdot m, e) +$$ + +$$ +m _ {i, j} = 1 \quad \text {i f} \quad e _ {i, j} > \tau , \quad \text {e l s e} \quad 0 +$$ + +where $\mathrm{mse(A,B)}$ is the mean squared error between each element in matrix A and B, $\widehat{x}$ is the output deblured image, e is the motion edge frame, m is the motion edge mask, $\tau$ is the threshold parameter. + +Secondly, a cascade frame work were proposed that two EFNet was connected in cascade to further enhance the image deblurring ability. The first EFNet took four frames of the event voxels that were relatively remote to the center of the exposure time while the second EFNet took two frames of the event voxels that were relatively close to the center of the exposure time. The two EFNet form a coarse-fine paradigm that gradually remove the motion delur. + +![](images/90e21bbf3f89174ac0e0fa957469dd6db0f60d186023d4e39a2ba972e14a96e5.jpg) +Figure 11. The visualization of the motion edges. + +![](images/0cfd0d4be9e6d4f6281d40f20feff55f33ef42523e6946f552f2758c4155dabf.jpg) +Figure 12. The edges in the ground truth frame + +# 4.14. KUnet + +# 4.14.1. Architecture + +Their solution is built upon a custom KUnet backbone tailored for event-based image deblurring. The model employs a dual-encoder strategy that separately processes RGB images and voxelized event data, each through a dedicated encoder branch. At the bottleneck, the features are fused via channel-wise concatenation and passed through a transformer module. + +A key novelty in the design is the use of KANLinear layers within the transformer block. These layers, based on spline-interpolated kernels, improve attention expressiveness without adding significant computational overhead. This fusion architecture leverages the temporal sharpness of events with the spatial-semantic richness of RGB images to produce high-fidelity deblurred outputs. + +![](images/e84d3aa1ada2855602975b3faa64b193f7dfa02538dc62e0653e4b2bc050f2da.jpg) +Figure 13. Left: Input blurry frame. Right: output of KUnet, with detailed texture. + +![](images/d8e8b79c88d3c5152418bdc02984efc800208515ce23ccfbc32ffe188180b135.jpg) + +# 4.14.2. Implementation Details + +They train the model from scratch on the official NTIRE 2025 HighREV dataset without any external data or pretrained weights. The voxelized events are represented using 6 temporal bins, generating a 6-channel input tensor for the event encoder. + +Training was conducted using 2 NVIDIA A100 GPUs with a batch size of 8 and a patch size of $256 \times 256$ . They trained the network for 150k iterations using the AdamW optimizer ( $\beta_{1} = 0.9$ , $\beta_{2} = 0.99$ , weight decay = 1e-4) and a CosineAnnealingLR scheduler. Data augmentations included random horizontal flips and rotations. + +The loss function includes a PSNR loss weighted at 0.5. Their final checkpoint achieved a peak PSNR of 29.42 on the NTIRE 2025 validation phase. + +Inference was performed using a sliding window approach with a max minibatch size of 8. They observed an inference time of $\sim 0.15$ seconds per frame on an A100 GPU, and a memory footprint of approximately 16 GB during training. + +# Model Complexity: + +Parameters: 11M +FLOPs: Not computed +- GPU Memory Usage: 16 GB (training) +Inference Time: 0.15s/frame + +# Code and Resources: + +- GitHub: https://github.com/Splendor73/NTIRE2025_EventDeblur_challenge_asu +- Pretrained: https://www.dropbox.com/scl/fi/19td2xtbzxed2bg8tc9w0/17_KUnet.zip +- Results: https://www.dropbox.com/scl/fi/yrky29x2mdwt3k8e40yol/Results.zip + +# 4.15. Group10 + +The solution is built upon a custom adaptation of the EFNet deblurring framework[39]. The method strategically harnesses both conventional image data and event-based information to mitigate motion blur effectively. Key components of the approach include: + +Dual-Stream Network Architecture: The model consists of parallel convolutional streams. One stream processes the blurry input image, while the other processes event data, + +which is converted into a voxel grid representation. A cross-modal attention module subsequently fuses the features extracted from both modalities, enhancing the network's ability to recover fine details in dynamic scenes. + +Event Data Representation: The raw event data - comprising spatial coordinates, timestamps, and polarity - is transformed into a voxel grid. This process involves temporal normalization and spatial mapping, enabling the network to capture the dynamic nature of motion events with high precision. + +Training Strategy: Utilizing mixed precision training to maximize GPU efficiency and accelerate the convergence process. Gradient accumulation is employed to effectively simulate a larger batch size, which is critical for stable training on high-resolution data. The training loss is computed using the Mean Squared Error (MSE) criterion, guiding the network to produce high-quality deblurred images. + +Data Pipeline: Custom PyTorch Dataset classes handle the loading and preprocessing of both image and event data. The pipeline includes resizing, normalization, and careful synchronization between blurry images and their corresponding event data, ensuring data consistency across modalities. + +Performance Evaluation: The evaluation strategy employs widely accepted metrics such as PSNR and SSIM to quantify restoration quality. Test outputs are resized to their original dimensions and saved as lossless PNG images to preserve the fidelity of the results. + +Additional details include: + +Parameter Count: The EnhancedEFNet model consists of convolutional layers, CrossModalAttention blocks, and skip connections, leading to a parameter count in the range of millions. + +CrossModalAttention layers: These layers introduce additional tensor operations and memory usage. No external pre-trained models were directly used in training. The architecture was trained from scratch on the provided dataset. + +GPU Memory Usage: Memory usage is influenced by Batch Size, Default batch size of 4 per GPU, and Voxel Grid Representation, Uses 6 event bins, increasing input size. + +CrossModalAttention: Inspired by self-attention mechanisms in Transformer models. Hybrid Loss Function: Combines MSE and L1 loss for better generalization.CosineAnnealingLR Scheduler: Used to dynamically adjust learning rates during training. + +Use of Additional Training Data: Only NTIRE Dataset Used: The training was restricted to the HighREV dataset provided by NTIRE. No additional synthetic or external event-based datasets were incorporated. Potential Future Enhancements: Using real-world event datasets (e.g., DSEC, MVSEC) could improve generalization. Finetuning with pre-trained image restoration models (like DeblurGAN) could be explored. + +Quantitative and Qualitative Improvements Quantitative Improvements (Metrics & Performance): Peak Signal-to-Noise Ratio (PSNR): Achieved PSNR: 25.93. Improved compared to baseline event fusion models. Structural Similarity Index (SSIM): Achieved SSIM: 0.82. Indicates better perceptual quality in restored images. Qualitative Improvements (Visual Results & Generalization): Better Detail Recovery: The attention-based fusion of events and images leads to sharper edges and better contrast in reconstructed images. Works well in low-light or high-motion blur scenarios. + +Comparison with Baseline Models: Standard CNN-based deblurring struggles with fine-grained event details, but EnhancedEFNet effectively fuses event features to improve deblurring accuracy. CrossModalAttention aids in spatial alignment of events and images, reducing artifacts. Failure Cases & Future Improvements: Highly blurred images with saturated event data can still cause artifacts. More robust fusion mechanisms (e.g., transformer-based approaches) could further enhance performance. + +# Acknowledgments + +This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). Shaolin Su was supported by the HORIZON MSCA Postdoctoral Fellowships funded by the European Union (project number 101152858). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab). + +# A. Teams and affiliations + +# NTIRE 2025 team + +Title: NTIRE 2025 Event-Based Image Deblurring + +Challenge + +Members: + +Lei Sun1 (leo.sun@zju.edu.cn), + +Andrea Alfarano1 (andrea.alfarano@insait.ai), + +Peiqi Duan2 (duanqi0001@pku.edu.cn), + +Shaolin $\mathrm{Su}^3$ (shaolin@cvc.uab.cat), + +Kaiwei Wang4 (wangkaiwei@zju.edu.cn), + +Boxin Shi $^2$ (shiboxin@pku.edu.cn), + +Radu Timofte $^{5}$ (radu.timofte@uni-wuerzburg.de) + +Danda Pani Paudel1 (danda.paudel@insait.ai), + +Luc Van Gool1 (vangool@vision.ee.ethz.ch), + +# Affiliations: + +1 INSAIT, Sofia University "St. Kliment Ohridski", Bulgaria +2 Peking University, China +3 Computer Vision Center, Spain + +4 Zhejiang University, China +5 University of Würzburg, Germany + +# IVISLAB + +Title: Triple Event-stream Image Deblurring Network + +# Members: + +Qinglin Liu $^{1}$ (qlliu@hit.edu.cn), Wei Yu $^{2}$ , Xiaogian Lv $^{1}$ , Lu Yang $^{3}$ , Shuigen Wang $^{3}$ , Shengping Zhang $^{1}$ , Xiangyang Ji $^{2}$ + +# Affiliations: + +1 Harbin Institute of Technology, Weihai +2 Tsinghua University +3 Raytron Technology Co., Ltd. + +# MiVideoDeblur + +Title: Event-Based Image Deblurring from Team MiVideoDeblur + +# Members: + +Long Bao1 (baolong@xiaomi.com), Yuqiang Yang1, Jinao Song1, Ziyi Wang1, Shuang Wen1, Heng Sun1 + +# Affiliations: + +$^{1}$ Xiaomi Inc., China + +# 404NotFound + +Title: Event-Conditioned Dual-Modal Fusion for Motion Deblurring + +# Members: + +Kean Liu1 (rickyliu@mail.ustc.edu.cn), Mingchen Zhong1, Senyan Xu1, Zhijing Sun1, Jiaying Zhu1, Chengjie Ge1, Xingbo Wang1, Yidi Liu1, Xin Lu1, Xueyang Fu1, Zheng-Jun Zha1 + +# Affiliations: + +1 University of Science and Technology of China + +# Give_it_a_try + +Title: Event-Based Image Deblurring from Team Give_it_a_try + +# Members: + +Dawei Fan $^{1}$ (dawei.fan@partner.samsung.com), Dafeng Zhang $^{1}$ , Yong Yang $^{1}$ + +# Affiliations: + +$^{1}$ Samsung Research China- Beijing (SRC-B) + +# BUPTMM + +Title: Weighted Fusion for Event-based Image Deblurring Members: + +Siru Zhang $^{1}$ (zhangsr@bupt.edu.cn), Qinghua Yang $^{1}$ , Hao Kang $^{1}$ , Huiyuan Fu $^{1}$ , Heng Zhang $^{2}$ , Hongyuan Yu $^{2}$ , + +# Zhijuan Huang + +# Affiliations: + +1 Beijing University of Posts and Telecommunications, Beijing, China. +$^{2}$ Xiaomi Inc., China. + +# WEI + +Title: Bi-directional Gathered Recurrent Network for Event-based Image Deblurring + +# Members: + +Shuoyan Wei1 (shuoyan.wei@bjtu.edu.cn), + +Feng Li $^{2}$ , Runmin Cong $^{3}$ + +# Affiliations: + +$^{1}$ Institute of Information Science, Beijing Jiaotong University +$^{2}$ School of Computer Science and Engineering, Hefei University of Technology +$^{3}$ School of Control Science and Engineering, Shandong University + +# DVS-WHU + +Title: Dual Channel Cross-modal Mamba for Event-based Motion Deblurring + +# Members: + +Weiqi Luo1 (wikyluo@whu.edu.cn), + +Mingyun Lin1, Chenxu Jiang1, Hongyi Liu1, Lei Yu2 +Affiliations: + +$^{1}$ School of Electronic Information, Wuhan University +$^{2}$ School of Artificial Intelligence, Wuhan University + +# PixelRevive + +Title: Event-Based Image Deblurring from Team PixelRe-vive + +# Members: + +Weilun Li $^{1}$ (xyj961011@163.com), Jiajun Zhai $^{1}$ , Tingting Lin $^{1}$ + +# Affiliations: + +1 College of Optical Science and Engineering, Zhejiang University + +# CHD + +Title: Event-Image Deblurformer Network + +# Members: + +Shuang Ma1 (3125508679@qq.com), Sai Zhou2, Zhanwen Liu3, Yang Wang4 + +# Affiliations: + +1 Chang'an University, Xi'an, China + +# SMU + +Title: Explicit Feature Tracking and Iterative Refinement for Enhancing Event-based Image Deblurring + +Members: + +Eiffel Chong1, Nuwan Bandara1, Thivya Kandappu1 (thivyak@smu.edu.sg), Archan Misra1 + +Affiliations: + +$^{1}$ Singapore Management University + +# JNU620 + +Title: Event-Based Image Deblurring from Team JNU620 + +Members: + +Yihang Chen $^{1}$ (Ehang@stu.jnu.edu.cn), + +Zhan Li $^{1}$ , Weijun Yuan $^{1}$ , Wenzhuo Wang $^{1}$ , Boyang Yao $^{1}$ , Zhanglu Chen $^{1}$ + +Affiliations: + +$^{1}$ Department of Computer Science, Jinan University, Guangzhou, China + +# colab + +Title: Dynamic Enhanced Fusion Network for Event-based Image Deblurring + +Members: + +Yijing Sun $^{1}$ (syj3508852939@163.com), Tianjiao Wan $^{1}$ , Zijian Gao $^{1}$ , Qisheng Xu $^{1}$ , Kele Xu $^{1}$ + +Affiliations: + +$^{1}$ National University of Defense Technology + +# CMSL + +Title: Cascade Event Deblurring Model With Event Edge Loss + +Members: + +Yukun Zhang $^{1}$ (zhangyukun@cmhi.chinamobile.com), Yu He $^{1}$ , Xiaoyan Xie $^{1}$ , Tao Fu $^{1}$ + +Affiliations: + +1 China Mobile (Hangzhou) Information Technology Co., Ltd, Hangzhou, China + +# KUnet + +Title KUnet + +Members: + +Yashu Gautamkumar Patel1 (ypatel37@asu.edu), Vihar Ramesh Jain1, Divesh Basina1 + +Affiliations: + +1 Arizona State University + +# Group10 + +Title: Event-Based Image Deblurring from Team Group10 Members: + +Rishik Ashili $^{1}$ (rishik67_soe@jnu.ac.in), Manish Kumar Manjhi $^{1}$ , Sourav Kumar $^{1}$ , Prinon Benny $^{1}$ , Himanshu Ghunawat $^{1}$ , B Sri Sairam Gautam $^{1}$ , Anett Varghese $^{1}$ , Abhishek Yadav $^{1}$ + +Affiliations: + +1 Jawaharlal Nehru University, New Delhi, India + +# References + +[1] Inigo Alonso and Ana C Murillo. Ev-segnet: Semantic segmentation for event-based cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1 +[2] Jiaan Chen, Hao Shi, Yaozu Ye, Kailun Yang, Lei Sun, and Kaiwei Wang. Efficient human pose estimation via 3d event point cloud. In 2022 International Conference on 3D Vision (3DV), pages 1-10. IEEE, 2022. 1 +[3] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration. In European Conference on Computer Vision, pages 17-33. Springer, 2022. 5 +[4] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution $(\times 4)$ : Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[5] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[6] Zhangyi Cheng, Xiang Zhang, Lei Yu, Jianzhuang Liu, Wen Yang, and Gui-Song Xia. Recovering continuous scene dynamics from a single blurry image with events. arXiv preprint arXiv:2304.02695, 2023. 8 +[7] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[8] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[9] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In International Conference on Learning Representations, 2023. 10 +[10] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy + +Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozchikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[11] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[12] Guillermo Gallego, Tobi Delbruck, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew Davison, Jörg Conradt, Kostas Daniilidis, and Davide Scaramuzza. Event-based vision: A survey. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):154-180, 2022. 1 +[13] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[14] Xuanhua He, Ke Cao, Jie Zhang, Keyu Yan, Yingying Wang, Rui Li, Chengjun Xie, Danfeng Hong, and Man Zhou. Panmamba: Effective pan-sharpening with state space model. Information Fusion, 115:102779, 2025. 7 +[15] Yuhuang Hu, Shih-Chii Liu, and Tobi Delbruck. v2e: From video frames to realistic dvs events. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1312-1321, 2021. 8 +[16] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[17] J Kim, D K Ghosh, and Y J Jung. Event-based video deblurring based on image and event feature fusion. Expert Systems with Applications, 223:119917, 2023. 10 +[18] Taewoo Kim, Hoonhee Cho, and Kuk-Jin Yoon. Frequency-aware event-based video deblurring for real-world motion blur. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24966-24976, 2024. 9, 10 +[19] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[20] Huan Li, Hailong Shi, and Xingyu Gao. A coarse-to-fine fusion network for event-based image deblurring. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 974-982, 2024. 9, 10 +[21] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby + +Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[22] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[23] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[24] Jingyun Liang, Jiezhang Cao, Guolei Sun, Kai Zhang, Luc Van Gool, and Radu Timofte. Swinir: Image restoration using swim transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1833-1844, 2021. 5 +[25] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[26] Kean Liu, Mingchen Zhong, Senyan Xu, Zhijing Sun, Jiaying Zhu, Chengjie Ge, Xin Lu, Xingbo Wang, Xueyang Fu, and Zheng-Jun Zha. Event-conditioned dual-modal fusion for motion deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 4 +[27] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[28] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[29] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 7, 10 +[30] Xingyu Lu, Lei Sun, Diyang Gu, and Kaiwei Wang. Sge: structured light system based on gray code with an event camera. Optics Express, 32(26):46044-46061, 2024. 1 +[31] Xintian Mao, Qingli Li, and Yan Wang. Adarevd: Adaptive patch exiting reversible decoder pushes the limit of image deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25681-25690, 2024. 10 + +[32] Nico Messikommer, Stamatos Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-bracket high dynamic range imaging with event cameras. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 547-557, 2022. 1 +[33] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-driven feature tracking for event cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5642-5651, 2023. 9 +[34] Manasi Muglikar, Guillermo Gallego, and Davide Scaramuzza. Esl: Event-based structured light. In 2021 International Conference on 3D Vision (3DV), pages 1165-1174. IEEE, 2021. 1 +[35] Seungjun Nah, Sungyong Baik, Seokil Hong, Gyeongsik Moon, Sanghyun Son, Radu Timofte, and Kyoung Mu Lee. Ntire 2019 challenge on video deblurring and superresolution: Dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition workshops, pages 1996-2005, 2019. 8 +[36] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[37] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[38] Timo Stoffregen, Cedric Scheerlinck, Davide Scaramuzza, Tom Drummond, Nick Barnes, Lindsay Kleeman, and Robert Mahony. Reducing the sim-to-real gap for event cameras. In European Conference on Computer Vision, pages 534-549, 2020. 8 +[39] Lei Sun, Christos Sakaridis, Jingyun Liang, Qi Jiang, Kailun Yang, Peng Sun, Yaozu Ye, Kaiwei Wang, and Luc Van Gool. Event-based fusion for motion deblurring with cross-modal attention. In European Conference on Computer Vision, pages 412-428. Springer, 2022. 1, 3, 4, 6, 7, 8, 9, 10, 11, 12 +[40] Lei Sun, Christos Sakaridis, Jingyun Liang, Peng Sun, Jiezhang Cao, Kai Zhang, Qi Jiang, Kaiwei Wang, and Luc Van Gool. Event-based frame interpolation with ad-hoc deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18043-18052, 2023. 2, 3, 4, 6, 7, 8, 9, 10 +[41] Lei Sun, Daniel Gehrig, Christos Sakaridis, Mathias Gehrig, Jingyun Liang, Peng Sun, Zhijie Xu, Kaiwei Wang, Luc Van Gool, and Davide Scaramuzza. A unified framework for event-based frame interpolation with ad-hoc deblurring in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1, 3 +[42] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc + +Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[43] Lei Sun, Yuhan Bao, Jiajun Zhai, Jingyun Liang, Yu lun Zhang, Kaiwei Wang, Danda Pani Paudel, and Luc Van Gool. Low-light image enhancement using event-based illumination estimation. arXiv preprint arXiv:2504.09379, 2025.1 +[44] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[45] Zhijing Sun, Xueyang Fu, Longzhuo Huang, Aiping Liu, and Zheng-Jun Zha. Motion aware event representation-driven image deblurring. In European Conference on Computer Vision, pages 418-435. Springer, 2024. 4 +[46] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[47] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[48] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[49] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 5 +[50] Wenming Weng, Yueyi Zhang, and Zhiwei Xiong. Event-based blurry frame interpolation under blind exposure. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1588-1598, 2023. 6 +[51] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[52] Wen Yang, Jinjian Wu, Jupo Ma, Leida Li, and Guangming Shi. Motion deblurring via spatial-temporal collaboration of frames and events. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 6531-6539, 2024. 6 +[53] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Pro + +ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2 +[54] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Learning enriched features for real image restoration and enhancement. In European Conference on Computer Vision, pages 492-511. Springer, 2020. 5 +[55] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5728-5739, 2022. 4, 5, 8, 9 +[56] Shaobo Zhang, Lei Sun, and Kaiwei Wang. A multi-scale recurrent framework for motion segmentation with event camera. IEEE Access, 11:80105-80114, 2023. 1 +[57] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 7 \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12401/images/047d45c9675c3b93c5f749c35e8ec0364ffc2a4e8ec6d19a9d71dffe864ced92.jpg b/data/2025/2504_12xxx/2504.12401/images/047d45c9675c3b93c5f749c35e8ec0364ffc2a4e8ec6d19a9d71dffe864ced92.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68f13932f98e45c072509cce2557092780479279 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/047d45c9675c3b93c5f749c35e8ec0364ffc2a4e8ec6d19a9d71dffe864ced92.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:417c030c55c2bedd5e9052ff40bd3fd605309d81d34c75ada2cbf9275cee9417 +size 3615 diff --git a/data/2025/2504_12xxx/2504.12401/images/0cfd0d4be9e6d4f6281d40f20feff55f33ef42523e6946f552f2758c4155dabf.jpg b/data/2025/2504_12xxx/2504.12401/images/0cfd0d4be9e6d4f6281d40f20feff55f33ef42523e6946f552f2758c4155dabf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0019b0c560b0980bfe2fb27e962d588ea298eb7c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/0cfd0d4be9e6d4f6281d40f20feff55f33ef42523e6946f552f2758c4155dabf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c8606b86bd024830a8e1cd4de25a08946cb49945b805a40107334dc6bf1a4a3 +size 17999 diff --git a/data/2025/2504_12xxx/2504.12401/images/45b31251263c662d830970b24c8d4673eaabe0b4c9988af96695c9a7b73ede74.jpg b/data/2025/2504_12xxx/2504.12401/images/45b31251263c662d830970b24c8d4673eaabe0b4c9988af96695c9a7b73ede74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c970493dc2ab71b350df75faec2ada4bb3796dd --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/45b31251263c662d830970b24c8d4673eaabe0b4c9988af96695c9a7b73ede74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7b7d464face5290f8580805d725e68b38c2f6efcc4454f550ec74653e29d70f +size 72309 diff --git a/data/2025/2504_12xxx/2504.12401/images/4a60186a472d957c6deb380f0d390f1eecef3fee54b31e378bd9bbb786363b64.jpg b/data/2025/2504_12xxx/2504.12401/images/4a60186a472d957c6deb380f0d390f1eecef3fee54b31e378bd9bbb786363b64.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89d8cf4e03f4896cfbf8da31b459f22f86cf627d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/4a60186a472d957c6deb380f0d390f1eecef3fee54b31e378bd9bbb786363b64.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a377b481e712456167fec2a5af2c192b1730c4ce3381302c53852130493d6722 +size 4939 diff --git a/data/2025/2504_12xxx/2504.12401/images/53355ba7ec22077eb962d8f0e9c27950cccf02c59a1dd759a83c4d3ea57b20ea.jpg b/data/2025/2504_12xxx/2504.12401/images/53355ba7ec22077eb962d8f0e9c27950cccf02c59a1dd759a83c4d3ea57b20ea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2c4b3cf6fb5d5944ccd3ca526b1899a51f3d29d9 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/53355ba7ec22077eb962d8f0e9c27950cccf02c59a1dd759a83c4d3ea57b20ea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7fa3818b4cabb7d420f471dd52490da9ce9d04aec7c8ef06d49fb849e9836a6 +size 13915 diff --git a/data/2025/2504_12xxx/2504.12401/images/631527531248f11e51e7497f1fac20dcc126570e11b65b294387f6222f8be17d.jpg b/data/2025/2504_12xxx/2504.12401/images/631527531248f11e51e7497f1fac20dcc126570e11b65b294387f6222f8be17d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..87efef1e67e2f5f59b7ea675b726d6d63038c5e5 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/631527531248f11e51e7497f1fac20dcc126570e11b65b294387f6222f8be17d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae4cbb566d78068450c80a384c749e1bd554b5129ea757592d01277945c7840c +size 72576 diff --git a/data/2025/2504_12xxx/2504.12401/images/7627b94ddaed6363c5a60fa46edfbcbbd4028fb146c98f1913397dd84eabe46c.jpg b/data/2025/2504_12xxx/2504.12401/images/7627b94ddaed6363c5a60fa46edfbcbbd4028fb146c98f1913397dd84eabe46c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c9aa03cd1e8ebdd01538f130428d311b4fbde1e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/7627b94ddaed6363c5a60fa46edfbcbbd4028fb146c98f1913397dd84eabe46c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17b70cff8c533074834617a4b633b0f947a1e17f75db146a54635957bfbd6326 +size 57990 diff --git a/data/2025/2504_12xxx/2504.12401/images/8611a01662b0ce6655ccee7173684d1e4b03e2302ec0f1864c85d316a26da03d.jpg b/data/2025/2504_12xxx/2504.12401/images/8611a01662b0ce6655ccee7173684d1e4b03e2302ec0f1864c85d316a26da03d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccb1bf69ebf7684de334040b6c9daab5dd201334 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/8611a01662b0ce6655ccee7173684d1e4b03e2302ec0f1864c85d316a26da03d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:073eb862a6ee8bc9dd0d9c06884817099f908cf2d01defb3470dc3af0ac8fa79 +size 29159 diff --git a/data/2025/2504_12xxx/2504.12401/images/8c4442be526dca917ebfb53309df0e69db477c4ded87f55e59353dfeadb1e786.jpg b/data/2025/2504_12xxx/2504.12401/images/8c4442be526dca917ebfb53309df0e69db477c4ded87f55e59353dfeadb1e786.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e211704234adf4d40e133c5478e82cfb6998f25 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/8c4442be526dca917ebfb53309df0e69db477c4ded87f55e59353dfeadb1e786.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63d3f918118e7715826523352d1cadac53f127c59f54720e0875c9523da85a1d +size 33406 diff --git a/data/2025/2504_12xxx/2504.12401/images/90e21bbf3f89174ac0e0fa957469dd6db0f60d186023d4e39a2ba972e14a96e5.jpg b/data/2025/2504_12xxx/2504.12401/images/90e21bbf3f89174ac0e0fa957469dd6db0f60d186023d4e39a2ba972e14a96e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0e7aa5d116b19336912090c4f519b0612636e75 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/90e21bbf3f89174ac0e0fa957469dd6db0f60d186023d4e39a2ba972e14a96e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be0211193ff5d12db545bc416012117f2e41d921356cd3aef968592c9942f9af +size 18776 diff --git a/data/2025/2504_12xxx/2504.12401/images/94f49739a4b00f51de806981019b97f06eafd67affb2ff59cff77e96de1c28d9.jpg b/data/2025/2504_12xxx/2504.12401/images/94f49739a4b00f51de806981019b97f06eafd67affb2ff59cff77e96de1c28d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91a536d26e41b7b3875f6eba1ea06862012fad1d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/94f49739a4b00f51de806981019b97f06eafd67affb2ff59cff77e96de1c28d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c7268c9f9c48c310a61171663bed880250c3748cdbf2e4b4d6d8b0568c658971 +size 31243 diff --git a/data/2025/2504_12xxx/2504.12401/images/9f72a3b5343b2954180c60b89da07dea92ecc4de71c2198cbf209ef89d3a6ac9.jpg b/data/2025/2504_12xxx/2504.12401/images/9f72a3b5343b2954180c60b89da07dea92ecc4de71c2198cbf209ef89d3a6ac9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3af5b358cb0c89f430c05e771b62d47063c337c3 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/9f72a3b5343b2954180c60b89da07dea92ecc4de71c2198cbf209ef89d3a6ac9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bff9fbd6d119befe68845cd85d206af74a480155b655cbec8fe5ee529231c966 +size 33614 diff --git a/data/2025/2504_12xxx/2504.12401/images/9f8be6c650b99795b0cdb6d83e07801f8311e1e0282f1bf2309fd93b92bb1313.jpg b/data/2025/2504_12xxx/2504.12401/images/9f8be6c650b99795b0cdb6d83e07801f8311e1e0282f1bf2309fd93b92bb1313.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b176b9459f2fa998377be6782b8b933c313b0ff --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/9f8be6c650b99795b0cdb6d83e07801f8311e1e0282f1bf2309fd93b92bb1313.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d660cd5b2eedd31b8c80a050a5b295a5098842f94bfb99514e01481e1863d31 +size 106283 diff --git a/data/2025/2504_12xxx/2504.12401/images/a3e30ae4e1827997e27cf7621f6fffe30946062565c228e2edb467ce551cb086.jpg b/data/2025/2504_12xxx/2504.12401/images/a3e30ae4e1827997e27cf7621f6fffe30946062565c228e2edb467ce551cb086.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0995e94b46f71fffd711d809ebe06a3b19c326d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/a3e30ae4e1827997e27cf7621f6fffe30946062565c228e2edb467ce551cb086.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e2070eadab0b63b8960ad228b980ea0b1fbe02f53999979bc9acc19ff57d014 +size 78706 diff --git a/data/2025/2504_12xxx/2504.12401/images/b9251cf46272ae3008bd4f011fa63a877d1c023a96ebea5494bbf7f13e3a452f.jpg b/data/2025/2504_12xxx/2504.12401/images/b9251cf46272ae3008bd4f011fa63a877d1c023a96ebea5494bbf7f13e3a452f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6b4b483fb0b5a179525ec737e029242ceb493cee --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/b9251cf46272ae3008bd4f011fa63a877d1c023a96ebea5494bbf7f13e3a452f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:485ba9a3a2223dec08a68d69196d613552496fd0fef0c78580a9752c7bc55ae3 +size 36869 diff --git a/data/2025/2504_12xxx/2504.12401/images/c1b557d47e8506e24eb01ecd1ef1e495035f8ebd33e86e59e2ddd403108412fb.jpg b/data/2025/2504_12xxx/2504.12401/images/c1b557d47e8506e24eb01ecd1ef1e495035f8ebd33e86e59e2ddd403108412fb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccd465e50299f298cab4548bd1f1f15b6f27cd55 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/c1b557d47e8506e24eb01ecd1ef1e495035f8ebd33e86e59e2ddd403108412fb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7215b638cddebd5bb94446f067356991b081895d6cc750c067b64055d099e377 +size 30985 diff --git a/data/2025/2504_12xxx/2504.12401/images/c721fffa4b1c80802496768db4fde67eeb3302413c722054c2d6fddc7e8ce50f.jpg b/data/2025/2504_12xxx/2504.12401/images/c721fffa4b1c80802496768db4fde67eeb3302413c722054c2d6fddc7e8ce50f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5f8c94476208dd2c9354ec1a2233ce93c57e162 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/c721fffa4b1c80802496768db4fde67eeb3302413c722054c2d6fddc7e8ce50f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:713864534cf5a8c2ff2a85e7866d078a4939bb133a6e2b702705286176ceeb5f +size 39243 diff --git a/data/2025/2504_12xxx/2504.12401/images/d4312036562aa983d2f712981c79dd05fea8c0c2fd66cd31016a474e139d5a33.jpg b/data/2025/2504_12xxx/2504.12401/images/d4312036562aa983d2f712981c79dd05fea8c0c2fd66cd31016a474e139d5a33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82a1c4d491e29adc556d280df639ffd7e9f8bae6 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/d4312036562aa983d2f712981c79dd05fea8c0c2fd66cd31016a474e139d5a33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af8e68963728dabd7341ef8b51184682ca8e0d6ad598d2abcd56501fe0828dc0 +size 33043 diff --git a/data/2025/2504_12xxx/2504.12401/images/d8e8b79c88d3c5152418bdc02984efc800208515ce23ccfbc32ffe188180b135.jpg b/data/2025/2504_12xxx/2504.12401/images/d8e8b79c88d3c5152418bdc02984efc800208515ce23ccfbc32ffe188180b135.jpg new file mode 100644 index 0000000000000000000000000000000000000000..572bf8bb78dad3540c8ff1b3c16f68a9856ad941 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/d8e8b79c88d3c5152418bdc02984efc800208515ce23ccfbc32ffe188180b135.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c80d5cb1d1de89c203a16867879177ee8be9eeaa1a95df9dcbb6b9d09d361b2 +size 11152 diff --git a/data/2025/2504_12xxx/2504.12401/images/ddc48fce0e5764c0d89a584e63e9518afb9950de5745b935d97fd99b64708c59.jpg b/data/2025/2504_12xxx/2504.12401/images/ddc48fce0e5764c0d89a584e63e9518afb9950de5745b935d97fd99b64708c59.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d78d6bd9b3bb95733be721c1a5501d7989ee0568 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/ddc48fce0e5764c0d89a584e63e9518afb9950de5745b935d97fd99b64708c59.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f1e08b29e0bb9509d7fcc0e9022dfc671afbc393c4f4fa42a060d3216f48f1d +size 3172 diff --git a/data/2025/2504_12xxx/2504.12401/images/e84d3aa1ada2855602975b3faa64b193f7dfa02538dc62e0653e4b2bc050f2da.jpg b/data/2025/2504_12xxx/2504.12401/images/e84d3aa1ada2855602975b3faa64b193f7dfa02538dc62e0653e4b2bc050f2da.jpg new file mode 100644 index 0000000000000000000000000000000000000000..256cbc1cbadd997194dec5e1230495ab1e239c88 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/images/e84d3aa1ada2855602975b3faa64b193f7dfa02538dc62e0653e4b2bc050f2da.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1000b8986dda404b7d78b2aba1515e55dddf4bfa216564d011887822226d1274 +size 10049 diff --git a/data/2025/2504_12xxx/2504.12401/layout.json b/data/2025/2504_12xxx/2504.12401/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d4097a087dab5e09a4300cc61f3792e8f9f0415d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12401/layout.json @@ -0,0 +1,19387 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 56, + 103, + 553, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 103, + 553, + 121 + ], + "spans": [ + { + "bbox": [ + 56, + 103, + 553, + 121 + ], + "type": "text", + "content": "NTIRE 2025 Challenge on Event-Based Image Deblurring: Methods and Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 88, + 144, + 134, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 144, + 134, + 156 + ], + "spans": [ + { + "bbox": [ + 88, + 144, + 134, + 156 + ], + "type": "text", + "content": "Lei Sun*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 158, + 132, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 158, + 132, + 170 + ], + "spans": [ + { + "bbox": [ + 78, + 158, + 132, + 170 + ], + "type": "text", + "content": "Boxin Shi*" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 162, + 144, + 248, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 144, + 248, + 156 + ], + "spans": [ + { + "bbox": [ + 162, + 144, + 248, + 156 + ], + "type": "text", + "content": "Andrea Alfarano*" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 157, + 158, + 230, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 158, + 230, + 170 + ], + "spans": [ + { + "bbox": [ + 157, + 158, + 230, + 170 + ], + "type": "text", + "content": "Radu Timofte*" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 276, + 144, + 335, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 144, + 335, + 157 + ], + "spans": [ + { + "bbox": [ + 276, + 144, + 335, + 157 + ], + "type": "text", + "content": "Peiqi Duan*" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 255, + 158, + 351, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 158, + 351, + 171 + ], + "spans": [ + { + "bbox": [ + 255, + 158, + 351, + 171 + ], + "type": "text", + "content": "Danda Pani Paudel*" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 362, + 144, + 421, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 144, + 421, + 156 + ], + "spans": [ + { + "bbox": [ + 362, + 144, + 421, + 156 + ], + "type": "text", + "content": "Shaolin Su*" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 376, + 158, + 449, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 158, + 449, + 171 + ], + "spans": [ + { + "bbox": [ + 376, + 158, + 449, + 171 + ], + "type": "text", + "content": "Luc Van Gool*" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 447, + 144, + 519, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 447, + 144, + 519, + 157 + ], + "spans": [ + { + "bbox": [ + 447, + 144, + 519, + 157 + ], + "type": "text", + "content": "Kaiwei Wang*" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 474, + 158, + 533, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 474, + 158, + 533, + 171 + ], + "spans": [ + { + "bbox": [ + 474, + 158, + 533, + 171 + ], + "type": "text", + "content": "Qinglin Liu" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 66, + 171, + 103, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 171, + 103, + 184 + ], + "spans": [ + { + "bbox": [ + 66, + 171, + 103, + 184 + ], + "type": "text", + "content": "Wei Yu" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 126, + 171, + 187, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 171, + 187, + 185 + ], + "spans": [ + { + "bbox": [ + 126, + 171, + 187, + 185 + ], + "type": "text", + "content": "Xiaogian Lv" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 210, + 172, + 253, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 172, + 253, + 185 + ], + "spans": [ + { + "bbox": [ + 210, + 172, + 253, + 185 + ], + "type": "text", + "content": "Lu Yang" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 275, + 172, + 347, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 275, + 172, + 347, + 185 + ], + "spans": [ + { + "bbox": [ + 275, + 172, + 347, + 185 + ], + "type": "text", + "content": "Shuigen Wang" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 370, + 172, + 457, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 172, + 457, + 186 + ], + "spans": [ + { + "bbox": [ + 370, + 172, + 457, + 186 + ], + "type": "text", + "content": "Shengping Zhang" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 478, + 172, + 544, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 478, + 172, + 544, + 185 + ], + "spans": [ + { + "bbox": [ + 478, + 172, + 544, + 185 + ], + "type": "text", + "content": "Xiangyang Ji" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 76, + 186, + 125, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 186, + 125, + 198 + ], + "spans": [ + { + "bbox": [ + 76, + 186, + 125, + 198 + ], + "type": "text", + "content": "Long Bao" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 150, + 186, + 187, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 186, + 187, + 199 + ], + "spans": [ + { + "bbox": [ + 150, + 186, + 187, + 199 + ], + "type": "text", + "content": "Yuqiang" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 242, + 186, + 297, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 186, + 297, + 199 + ], + "spans": [ + { + "bbox": [ + 242, + 186, + 297, + 199 + ], + "type": "text", + "content": "Jinao Song" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 186, + 375, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 186, + 375, + 199 + ], + "spans": [ + { + "bbox": [ + 320, + 186, + 375, + 199 + ], + "type": "text", + "content": "Ziyi Wang" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 400, + 186, + 460, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 400, + 186, + 460, + 199 + ], + "spans": [ + { + "bbox": [ + 400, + 186, + 460, + 199 + ], + "type": "text", + "content": "Shuang Wen" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 483, + 186, + 534, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 483, + 186, + 534, + 199 + ], + "spans": [ + { + "bbox": [ + 483, + 186, + 534, + 199 + ], + "type": "text", + "content": "Heng Sun" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 66, + 200, + 113, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 200, + 113, + 212 + ], + "spans": [ + { + "bbox": [ + 66, + 200, + 113, + 212 + ], + "type": "text", + "content": "Kean Liu" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 135, + 200, + 219, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 200, + 219, + 213 + ], + "spans": [ + { + "bbox": [ + 135, + 200, + 219, + 213 + ], + "type": "text", + "content": "Mingchen Zhong" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 242, + 200, + 297, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 200, + 297, + 213 + ], + "spans": [ + { + "bbox": [ + 242, + 200, + 297, + 213 + ], + "type": "text", + "content": "Senyan Xu" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 319, + 200, + 378, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 200, + 378, + 213 + ], + "spans": [ + { + "bbox": [ + 319, + 200, + 378, + 213 + ], + "type": "text", + "content": "Zhijing Sun" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 399, + 200, + 460, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 200, + 460, + 213 + ], + "spans": [ + { + "bbox": [ + 399, + 200, + 460, + 213 + ], + "type": "text", + "content": "Jiaying Zhu" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 482, + 200, + 533, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 482, + 200, + 533, + 213 + ], + "spans": [ + { + "bbox": [ + 482, + 200, + 533, + 213 + ], + "type": "text", + "content": "Chengjie 6" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 77, + 214, + 145, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 214, + 145, + 228 + ], + "spans": [ + { + "bbox": [ + 77, + 214, + 145, + 228 + ], + "type": "text", + "content": "Xingbo Wang" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 170, + 214, + 211, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 214, + 211, + 227 + ], + "spans": [ + { + "bbox": [ + 170, + 214, + 211, + 227 + ], + "type": "text", + "content": "Yidi Liu" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 296, + 214, + 356, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 296, + 214, + 356, + 227 + ], + "spans": [ + { + "bbox": [ + 296, + 214, + 356, + 227 + ], + "type": "text", + "content": "Xueyang Fu" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 381, + 214, + 455, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 214, + 455, + 227 + ], + "spans": [ + { + "bbox": [ + 381, + 214, + 455, + 227 + ], + "type": "text", + "content": "Zheng-Jun Zha" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 481, + 214, + 533, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 481, + 214, + 533, + 226 + ], + "spans": [ + { + "bbox": [ + 481, + 214, + 533, + 226 + ], + "type": "text", + "content": "Dawei Fan" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 67, + 228, + 138, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 228, + 138, + 240 + ], + "spans": [ + { + "bbox": [ + 67, + 228, + 138, + 240 + ], + "type": "text", + "content": "Dafeng Zhang" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 162, + 228, + 216, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 228, + 216, + 241 + ], + "spans": [ + { + "bbox": [ + 162, + 228, + 216, + 241 + ], + "type": "text", + "content": "Yong Yang" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 239, + 228, + 293, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 239, + 228, + 293, + 241 + ], + "spans": [ + { + "bbox": [ + 239, + 228, + 293, + 241 + ], + "type": "text", + "content": "Siru Zhang" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 228, + 388, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 228, + 388, + 241 + ], + "spans": [ + { + "bbox": [ + 317, + 228, + 388, + 241 + ], + "type": "text", + "content": "Qinghua Yang" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 410, + 228, + 462, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 228, + 462, + 241 + ], + "spans": [ + { + "bbox": [ + 410, + 228, + 462, + 241 + ], + "type": "text", + "content": "Hao Kang" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 484, + 228, + 543, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 484, + 228, + 543, + 241 + ], + "spans": [ + { + "bbox": [ + 484, + 228, + 543, + 241 + ], + "type": "text", + "content": "Huiyuan Fu" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 96, + 242, + 157, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 242, + 157, + 255 + ], + "spans": [ + { + "bbox": [ + 96, + 242, + 157, + 255 + ], + "type": "text", + "content": "Heng Zhang" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 185, + 242, + 235, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 185, + 242, + 235, + 255 + ], + "spans": [ + { + "bbox": [ + 185, + 242, + 235, + 255 + ], + "type": "text", + "content": "Hongyuan" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 280, + 242, + 354, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 242, + 354, + 255 + ], + "spans": [ + { + "bbox": [ + 280, + 242, + 354, + 255 + ], + "type": "text", + "content": "Zhijuan Huang" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 382, + 242, + 447, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 382, + 242, + 447, + 255 + ], + "spans": [ + { + "bbox": [ + 382, + 242, + 447, + 255 + ], + "type": "text", + "content": "Shuoyan Wei" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 475, + 242, + 515, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 475, + 242, + 515, + 255 + ], + "spans": [ + { + "bbox": [ + 475, + 242, + 515, + 255 + ], + "type": "text", + "content": "Feng Li" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 72, + 256, + 142, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 256, + 142, + 269 + ], + "spans": [ + { + "bbox": [ + 72, + 256, + 142, + 269 + ], + "type": "text", + "content": "Runmin Cong" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 166, + 256, + 218, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 256, + 218, + 269 + ], + "spans": [ + { + "bbox": [ + 166, + 256, + 218, + 269 + ], + "type": "text", + "content": "Weiqi Luo" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 244, + 256, + 307, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 256, + 307, + 268 + ], + "spans": [ + { + "bbox": [ + 244, + 256, + 307, + 268 + ], + "type": "text", + "content": "Mingyun Lin" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 332, + 256, + 398, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 256, + 398, + 268 + ], + "spans": [ + { + "bbox": [ + 332, + 256, + 398, + 268 + ], + "type": "text", + "content": "Chenxu Jiang" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 422, + 256, + 537, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 422, + 256, + 537, + 268 + ], + "spans": [ + { + "bbox": [ + 422, + 256, + 537, + 268 + ], + "type": "text", + "content": "Hongyi Liu Lei Yu" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 77, + 270, + 126, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 270, + 126, + 281 + ], + "spans": [ + { + "bbox": [ + 77, + 270, + 126, + 281 + ], + "type": "text", + "content": "Weilun Li" + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 150, + 270, + 206, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 270, + 206, + 282 + ], + "spans": [ + { + "bbox": [ + 150, + 270, + 206, + 282 + ], + "type": "text", + "content": "Jiajun Zhai" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 244, + 270, + 292, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 270, + 292, + 282 + ], + "spans": [ + { + "bbox": [ + 244, + 270, + 292, + 282 + ], + "type": "text", + "content": "ngting Lin" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 317, + 270, + 373, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 270, + 373, + 282 + ], + "spans": [ + { + "bbox": [ + 317, + 270, + 373, + 282 + ], + "type": "text", + "content": "Shuang Ma" + } + ] + } + ], + "index": 54 + }, + { + "bbox": [ + 399, + 270, + 443, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 399, + 270, + 443, + 281 + ], + "spans": [ + { + "bbox": [ + 399, + 270, + 443, + 281 + ], + "type": "text", + "content": "Sai Zhou" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 467, + 270, + 533, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 467, + 270, + 533, + 281 + ], + "spans": [ + { + "bbox": [ + 467, + 270, + 533, + 281 + ], + "type": "text", + "content": "Zhanwen Liu" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 78, + 284, + 135, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 284, + 135, + 297 + ], + "spans": [ + { + "bbox": [ + 78, + 284, + 135, + 297 + ], + "type": "text", + "content": "Yang Wang" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 160, + 284, + 223, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 284, + 223, + 297 + ], + "spans": [ + { + "bbox": [ + 160, + 284, + 223, + 297 + ], + "type": "text", + "content": "Eiffel Chong" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 249, + 284, + 327, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 284, + 327, + 296 + ], + "spans": [ + { + "bbox": [ + 249, + 284, + 327, + 296 + ], + "type": "text", + "content": "Nuwan Bandara" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 351, + 284, + 439, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 284, + 439, + 297 + ], + "spans": [ + { + "bbox": [ + 351, + 284, + 439, + 297 + ], + "type": "text", + "content": "Thivya Kandappu" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 465, + 284, + 532, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 465, + 284, + 532, + 296 + ], + "spans": [ + { + "bbox": [ + 465, + 284, + 532, + 296 + ], + "type": "text", + "content": "Archan Misra" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 96, + 297, + 161, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 297, + 161, + 310 + ], + "spans": [ + { + "bbox": [ + 96, + 297, + 161, + 310 + ], + "type": "text", + "content": "Yihang Chen" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 189, + 297, + 228, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 297, + 228, + 310 + ], + "spans": [ + { + "bbox": [ + 189, + 297, + 228, + 310 + ], + "type": "text", + "content": "Zhan Li" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 257, + 297, + 320, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 297, + 320, + 310 + ], + "spans": [ + { + "bbox": [ + 257, + 297, + 320, + 310 + ], + "type": "text", + "content": "Weijun Yuan" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 348, + 297, + 425, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 297, + 425, + 311 + ], + "spans": [ + { + "bbox": [ + 348, + 297, + 425, + 311 + ], + "type": "text", + "content": "Wenzhuo Wang" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 453, + 297, + 514, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 453, + 297, + 514, + 310 + ], + "spans": [ + { + "bbox": [ + 453, + 297, + 514, + 310 + ], + "type": "text", + "content": "Boyang Yao" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 73, + 312, + 143, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 312, + 143, + 324 + ], + "spans": [ + { + "bbox": [ + 73, + 312, + 143, + 324 + ], + "type": "text", + "content": "Zhanglu Chen" + } + ] + } + ], + "index": 67 + }, + { + "bbox": [ + 167, + 312, + 220, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 312, + 220, + 324 + ], + "spans": [ + { + "bbox": [ + 167, + 312, + 220, + 324 + ], + "type": "text", + "content": "Yijing Sun" + } + ] + } + ], + "index": 68 + }, + { + "bbox": [ + 245, + 312, + 310, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 245, + 312, + 310, + 324 + ], + "spans": [ + { + "bbox": [ + 245, + 312, + 310, + 324 + ], + "type": "text", + "content": "Tianjiao Wan" + } + ] + } + ], + "index": 69 + }, + { + "bbox": [ + 334, + 312, + 388, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 312, + 388, + 324 + ], + "spans": [ + { + "bbox": [ + 334, + 312, + 388, + 324 + ], + "type": "text", + "content": "Zijian Gao" + } + ] + } + ], + "index": 70 + }, + { + "bbox": [ + 412, + 312, + 471, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 412, + 312, + 471, + 324 + ], + "spans": [ + { + "bbox": [ + 412, + 312, + 471, + 324 + ], + "type": "text", + "content": "Qisheng Xu" + } + ] + } + ], + "index": 71 + }, + { + "bbox": [ + 495, + 312, + 536, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 495, + 312, + 536, + 323 + ], + "spans": [ + { + "bbox": [ + 495, + 312, + 536, + 323 + ], + "type": "text", + "content": "Kele Xu" + } + ] + } + ], + "index": 72 + }, + { + "bbox": [ + 90, + 326, + 156, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 90, + 326, + 156, + 338 + ], + "spans": [ + { + "bbox": [ + 90, + 326, + 156, + 338 + ], + "type": "text", + "content": "Yukun Zhang" + } + ] + } + ], + "index": 73 + }, + { + "bbox": [ + 182, + 326, + 214, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 182, + 326, + 214, + 338 + ], + "spans": [ + { + "bbox": [ + 182, + 326, + 214, + 338 + ], + "type": "text", + "content": "Yu He" + } + ] + } + ], + "index": 74 + }, + { + "bbox": [ + 241, + 326, + 303, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 326, + 303, + 338 + ], + "spans": [ + { + "bbox": [ + 241, + 326, + 303, + 338 + ], + "type": "text", + "content": "Xiaoyan Xie" + } + ] + } + ], + "index": 75 + }, + { + "bbox": [ + 329, + 326, + 365, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 329, + 326, + 365, + 338 + ], + "spans": [ + { + "bbox": [ + 329, + 326, + 365, + 338 + ], + "type": "text", + "content": "Tao Fu" + } + ] + } + ], + "index": 76 + }, + { + "bbox": [ + 391, + 326, + 521, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 391, + 326, + 521, + 338 + ], + "spans": [ + { + "bbox": [ + 391, + 326, + 521, + 338 + ], + "type": "text", + "content": "Yashu Gautamkumar Patel" + } + ] + } + ], + "index": 77 + }, + { + "bbox": [ + 94, + 339, + 186, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 339, + 186, + 352 + ], + "spans": [ + { + "bbox": [ + 94, + 339, + 186, + 352 + ], + "type": "text", + "content": "Vihar Ramesh Jain" + } + ] + } + ], + "index": 78 + }, + { + "bbox": [ + 214, + 339, + 284, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 214, + 339, + 284, + 351 + ], + "spans": [ + { + "bbox": [ + 214, + 339, + 284, + 351 + ], + "type": "text", + "content": "Divesh Basina" + } + ] + } + ], + "index": 79 + }, + { + "bbox": [ + 312, + 339, + 377, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 339, + 377, + 351 + ], + "spans": [ + { + "bbox": [ + 312, + 339, + 377, + 351 + ], + "type": "text", + "content": "Rishik Ashili" + } + ] + } + ], + "index": 80 + }, + { + "bbox": [ + 405, + 339, + 515, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 405, + 339, + 515, + 353 + ], + "spans": [ + { + "bbox": [ + 405, + 339, + 515, + 353 + ], + "type": "text", + "content": "Manish Kumar Manjhi" + } + ] + } + ], + "index": 81 + }, + { + "bbox": [ + 91, + 354, + 164, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 354, + 164, + 365 + ], + "spans": [ + { + "bbox": [ + 91, + 354, + 164, + 365 + ], + "type": "text", + "content": "Sourav Kumar" + } + ] + } + ], + "index": 82 + }, + { + "bbox": [ + 190, + 354, + 258, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 354, + 258, + 366 + ], + "spans": [ + { + "bbox": [ + 190, + 354, + 258, + 366 + ], + "type": "text", + "content": "Prinon Benny" + } + ] + } + ], + "index": 83 + }, + { + "bbox": [ + 285, + 354, + 388, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 354, + 388, + 365 + ], + "spans": [ + { + "bbox": [ + 285, + 354, + 388, + 365 + ], + "type": "text", + "content": "Himanshu Ghunawat" + } + ] + } + ], + "index": 84 + }, + { + "bbox": [ + 414, + 354, + 518, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 414, + 354, + 518, + 365 + ], + "spans": [ + { + "bbox": [ + 414, + 354, + 518, + 365 + ], + "type": "text", + "content": "B Sri Sairam Gautam" + } + ] + } + ], + "index": 85 + }, + { + "bbox": [ + 189, + 368, + 264, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 368, + 264, + 380 + ], + "spans": [ + { + "bbox": [ + 189, + 368, + 264, + 380 + ], + "type": "text", + "content": "Anett Varghese" + } + ] + } + ], + "index": 86 + }, + { + "bbox": [ + 307, + 368, + 388, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 307, + 368, + 388, + 379 + ], + "spans": [ + { + "bbox": [ + 307, + 368, + 388, + 379 + ], + "type": "text", + "content": "Abhishek Yadav" + } + ] + } + ], + "index": 87 + }, + { + "bbox": [ + 152, + 408, + 199, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 152, + 408, + 199, + 420 + ], + "spans": [ + { + "bbox": [ + 152, + 408, + 199, + 420 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 88 + }, + { + "bbox": [ + 55, + 436, + 296, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 436, + 296, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 436, + 296, + 616 + ], + "type": "text", + "content": "This paper presents an overview of NTIRE 2025 the First Challenge on Event-Based Image Deblurring, detailing the proposed methodologies and corresponding results. The primary goal of the challenge is to design an event-based method that achieves high-quality image deblurring, with performance quantitatively assessed using Peak Signal-to-Noise Ratio (PSNR). Notably, there are no restrictions on computational complexity or model size. The task focuses on leveraging both events and images as inputs for single-image deblurring. A total of 199 participants registered, among whom 15 teams successfully submitted valid results, offering valuable insights into the current state of event-based image deblurring. We anticipate that this challenge will drive further advancements in event-based vision research." + } + ] + } + ], + "index": 89 + }, + { + "bbox": [ + 315, + 408, + 394, + 420 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 408, + 394, + 420 + ], + "spans": [ + { + "bbox": [ + 315, + 408, + 394, + 420 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 90 + }, + { + "bbox": [ + 313, + 431, + 555, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 431, + 555, + 516 + ], + "spans": [ + { + "bbox": [ + 313, + 431, + 555, + 516 + ], + "type": "text", + "content": "Traditional camera output frames with relatively long exposure time in a fixed framerate. In contrast, event cameras, a kind of neuromorphic sensor, asynchronously capture pixelwise intensity changes with high temporal resolution [12], and have been applied in various fields such as computational imaging [32, 39-41, 43], human pose estimation [2], depth estimation [30, 34], image segmentation [1, 56], etc." + } + ] + } + ], + "index": 91 + }, + { + "bbox": [ + 313, + 530, + 555, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 530, + 555, + 626 + ], + "spans": [ + { + "bbox": [ + 313, + 530, + 555, + 626 + ], + "type": "text", + "content": "In recent years, significant efforts have been dedicated to event-based image restoration. Among various tasks, event-based image deblurring has gained the most attention, as the high temporal resolution of event cameras provides valuable priors for motion deblurring [39-41]. Notably, these methods operate under the assumption that input images and events are spatially aligned—a condition that applies to all approaches discussed in this paper." + } + ] + } + ], + "index": 92 + }, + { + "bbox": [ + 313, + 629, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 555, + 714 + ], + "type": "text", + "content": "In conjunction with the NTIRE 2025 Workshop on New Trends in Image Restoration and Enhancement, the Event-Based Image Deblurring Challenge was organized. The objective is to develop a network architecture or solution that effectively integrates events and images to enhance image deblurring performance. We hope that this challenge will serve as a starting point for promoting event-based image" + } + ] + } + ], + "index": 93 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 206, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 206, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 206, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.12401v1 [cs.CV] 16 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 645, + 295, + 684 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 645, + 295, + 684 + ], + "spans": [ + { + "bbox": [ + 55, + 645, + 295, + 684 + ], + "type": "text", + "content": "* L. Sun (lei.sun@insait.ai, INSAIT, Sofia University \"St. Kliment Ohridski\"), A. Alfarano, P. Duan, S. Su, K. Wang, B. Shi, R. Timofte, D. P. Paudel, and L. Van Gool were the challenge organizers, while the other authors participated in the challenge." + } + ] + } + ], + "index": 94 + }, + { + "bbox": [ + 56, + 685, + 238, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 685, + 238, + 693 + ], + "spans": [ + { + "bbox": [ + 56, + 685, + 238, + 693 + ], + "type": "text", + "content": "Appendix A contains the authors' teams and affiliations." + } + ] + } + ], + "index": 95 + }, + { + "bbox": [ + 56, + 694, + 223, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 694, + 223, + 703 + ], + "spans": [ + { + "bbox": [ + 56, + 694, + 223, + 703 + ], + "type": "text", + "content": "NTIRE 2025 webpage: https://cvlai.net/tnire/2025/." + } + ] + } + ], + "index": 96 + }, + { + "bbox": [ + 56, + 704, + 292, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 704, + 292, + 712 + ], + "spans": [ + { + "bbox": [ + 56, + 704, + 292, + 712 + ], + "type": "text", + "content": "Code: https://github.com/AHupuJR/NTIRE2025_EventDeblur_challenge." + } + ] + } + ], + "index": 97 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 95 + ], + "type": "text", + "content": "enhancement on a broader stage and contribute to the thriving development of the event-based vision community." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 97, + 295, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 97, + 295, + 312 + ], + "spans": [ + { + "bbox": [ + 56, + 97, + 295, + 312 + ], + "type": "text", + "content": "This challenge is one of the NTIRE 2025 Workshop associated challenges on: ambient lighting normalization [47], reflection removal in the wild [51], shadow removal [46], event-based image deblurring [42], image denoising [44], XGC quality assessment [27], UGC video enhancement [37], night photography rendering [10], image super-resolution (x4) [4], real-world face restoration [5], efficient super-resolution [36], HR depth estimation [53], efficient burst HDR and restoration [19], cross-domain few-shot object detection [11], short-form UGC video quality assessment and enhancement [22, 23], text to image generation model quality assessment [13], day and night raindrop removal for dual-focused images [21], video quality assessment for video conferencing [16], low light image enhancement [28], light field super-resolution [48], restore any image model (RAIM) in the wild [25], raw restoration and super-resolution [7] and raw reconstruction from RGB on smartphones [8]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 323, + 295, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 323, + 295, + 352 + ], + "spans": [ + { + "bbox": [ + 55, + 323, + 295, + 352 + ], + "type": "text", + "content": "2. NTIRE 2025 Event-Based Image Deblurring Challenge" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 358, + 295, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 358, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 55, + 358, + 295, + 441 + ], + "type": "text", + "content": "The goals of this challenge include: (1) promoting research in the area of event-based image deblurring, (2) facilitating comparisons between various methods, and (3) providing a platform for academic and industrial participants to engage, discuss, and potentially establish collaborations. This section delves into the specifics of the challenge, including the dataset, challenge phases and evaluation criteria." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 451, + 114, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 451, + 114, + 462 + ], + "spans": [ + { + "bbox": [ + 55, + 451, + 114, + 462 + ], + "type": "text", + "content": "2.1. Dataset" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 468, + 295, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 468, + 295, + 540 + ], + "spans": [ + { + "bbox": [ + 55, + 468, + 295, + 540 + ], + "type": "text", + "content": "The HighREV dataset [40] is used for both training and evaluation in this challenge. It consists of 1,771 sets of blurry images, corresponding events, and sharp images for training. Additionally, 421 sets are provided as validation data during the development phase, ensuring a comprehensive benchmark for assessing model performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 549, + 192, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 549, + 192, + 562 + ], + "spans": [ + { + "bbox": [ + 55, + 549, + 192, + 562 + ], + "type": "text", + "content": "2.2. Tracks and Competition" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 567, + 295, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 567, + 295, + 603 + ], + "spans": [ + { + "bbox": [ + 55, + 567, + 295, + 603 + ], + "type": "text", + "content": "The aim is to obtain a network design capable to produce high-quality results with the best performance measured by PSNR for event-based image deblurring." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 610, + 295, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 610, + 295, + 694 + ], + "spans": [ + { + "bbox": [ + 55, + 610, + 295, + 694 + ], + "type": "text", + "content": "Challenge phases Participants were given access to training images from the HighREV dataset. During the validation phase, they could use 421 images from the validation set for model tuning. In the test phase, evaluation was performed on 271 images from the test set. To ensure a fair assessment, the ground-truth images for the test phase remained hidden from participants throughout the challenge." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 322, + 70, + 548, + 263 + ], + "blocks": [ + { + "bbox": [ + 322, + 70, + 548, + 263 + ], + "lines": [ + { + "bbox": [ + 322, + 70, + 548, + 263 + ], + "spans": [ + { + "bbox": [ + 322, + 70, + 548, + 263 + ], + "type": "table", + "html": "
TeamRankPSNR (primary)SSIM
IVISLAB142.790.9196
MiVideoDeblur242.700.9281
404NotFound342.090.9300
Give_it_a_try440.370.9234
BUPTMM540.210.9179
WEI639.460.9171
DVS-WHU739.260.9101
PixelRevive839.120.9112
CHD938.560.9055
SMU1038.300.9047
JNU6201137.630.9019
colab1236.840.8962
CMSL1331.810.8900
KUnet1429.420.8600
Group101525.930.8200
", + "image_path": "7627b94ddaed6363c5a60fa46edfbcbbd4028fb146c98f1913397dd84eabe46c.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 273, + 553, + 316 + ], + "lines": [ + { + "bbox": [ + 313, + 273, + 553, + 316 + ], + "spans": [ + { + "bbox": [ + 313, + 273, + 553, + 316 + ], + "type": "text", + "content": "Table 1. Results of NTIRE 2025 Event-Based Image Deblurring Challenge. PSNR and SSIM scores are measured on the 271 test images from HighREV dataset. Team rankings are based primarily on PSNR." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 335, + 553, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 335, + 553, + 433 + ], + "spans": [ + { + "bbox": [ + 313, + 335, + 553, + 433 + ], + "type": "text", + "content": "Evaluation protocol Since the aim of this challenge is to foster the development of accurate event-based image deblurring networks, PSNR and SSIM on the 271 testing images are used as the quantitative evaluation metrics. A code example for calculating these metrics is available at https://github.com/AHupuJR/NTIRE2025_EventDeblurChallenge. The code of the submitted solutions and the pretrained weights are also available in this repository." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 441, + 421, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 441, + 421, + 454 + ], + "spans": [ + { + "bbox": [ + 313, + 441, + 421, + 454 + ], + "type": "text", + "content": "3. Challenge Results" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 461, + 553, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 461, + 553, + 534 + ], + "spans": [ + { + "bbox": [ + 313, + 461, + 553, + 534 + ], + "type": "text", + "content": "Table 1 shows the final rankings and test results of the participated teams. The implementation details of each team can be found in Sec.4, while team member information can be found in Appendix A. IVISLAB achieved the first place in terms of PSNR, followed by MiVideoDeblur and 404NotFound as the second and third place, respectively." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 539, + 394, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 539, + 394, + 552 + ], + "spans": [ + { + "bbox": [ + 313, + 539, + 394, + 552 + ], + "type": "text", + "content": "3.1. Participants" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 556, + 553, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 556, + 553, + 581 + ], + "spans": [ + { + "bbox": [ + 313, + 556, + 553, + 581 + ], + "type": "text", + "content": "The challenge attracted 199 registered participants, with 15 teams successfully submitting valid results." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 586, + 476, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 586, + 476, + 598 + ], + "spans": [ + { + "bbox": [ + 313, + 586, + 476, + 598 + ], + "type": "text", + "content": "3.2. Main Ideas and Architectures" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 604, + 553, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 604, + 553, + 652 + ], + "spans": [ + { + "bbox": [ + 313, + 604, + 553, + 652 + ], + "type": "text", + "content": "Throughout the challenge, participants explored various innovative techniques to improve deblurring performance. Below, we summarize some of the key strategies employed by the top-performing teams." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 654, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 654, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 654, + 553, + 713 + ], + "type": "text", + "content": "1. Hybrid architectures demonstrated strong performance, with all top-3 teams utilizing a combination of transformers and convolutional networks. This approach leverages global features extracted by transformers alongside local features captured by convolutional" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 70, + 703, + 179, + 712 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 703, + 179, + 712 + ], + "spans": [ + { + "bbox": [ + 70, + 703, + 179, + 712 + ], + "type": "text", + "content": "https://www.cvlai.net/ntire/2025/" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 295, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 295, + 120 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 295, + 120 + ], + "type": "text", + "content": "layers, both of which contribute to effective event-based image deblurring. Besides, both spatial and channel attention mechanisms play a crucial role in enhancing overall performance." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 121, + 295, + 358 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 55, + 121, + 295, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 121, + 295, + 179 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 295, + 179 + ], + "type": "text", + "content": "2. Pretrained weights matters. The winning team, IVISLAB, leveraged a backbone model initialized with pretrained weights from ImageNet, demonstrating the advantages of transfer learning in event-based image deblurring." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 180, + 295, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 180, + 295, + 227 + ], + "spans": [ + { + "bbox": [ + 55, + 180, + 295, + 227 + ], + "type": "text", + "content": "3. Cross-modal fusion proves beneficial. Several teams adopted EFNet [39] and REFID [40, 41] as a baseline model to fuse features from the event and image branches." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 228, + 295, + 287 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 228, + 295, + 287 + ], + "spans": [ + { + "bbox": [ + 55, + 228, + 295, + 287 + ], + "type": "text", + "content": "4. Effective training strategies. Both the second and third-place teams employed progressive learning techniques during training. Additionally, the winning team utilized a large patch size " + }, + { + "bbox": [ + 55, + 228, + 295, + 287 + ], + "type": "inline_equation", + "content": "(512 \\times 512)" + }, + { + "bbox": [ + 55, + 228, + 295, + 287 + ], + "type": "text", + "content": ", which contributed to improved performance." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 288, + 295, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 288, + 295, + 358 + ], + "spans": [ + { + "bbox": [ + 55, + 288, + 295, + 358 + ], + "type": "text", + "content": "5. Incorporating a novel Mamba-based architecture. Integrating features from both image and event modalities is crucial for enhancing the reconstruction quality of event-based deblurring methods. Team DVS-WHU introduced an innovative Mamba-based architecture to achieve more effective fusion." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 365, + 117, + 377 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 365, + 117, + 377 + ], + "spans": [ + { + "bbox": [ + 55, + 365, + 117, + 377 + ], + "type": "text", + "content": "3.3. Fairness" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 383, + 295, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 383, + 295, + 515 + ], + "spans": [ + { + "bbox": [ + 54, + 383, + 295, + 515 + ], + "type": "text", + "content": "To maintain fairness in the event-based image deblurring challenge, specific rules were implemented, primarily regarding the datasets used for training. Participants were permitted to use external datasets for training. However, incorporating the HighREV validation set, whether sharp or blurry images, was strictly prohibited, as this set served to evaluate the overall performance and generalizability of the models. Additionally, the use of HighREV test blurry images for training was not allowed. On the other hand, employing advanced data augmentation techniques during training was considered an acceptable practice." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 525, + 228, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 525, + 228, + 538 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 228, + 538 + ], + "type": "text", + "content": "4. Challenge Methods and Teams" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 544, + 124, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 124, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 124, + 555 + ], + "type": "text", + "content": "4.1. IVISLAB" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 561, + 295, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 561, + 295, + 670 + ], + "spans": [ + { + "bbox": [ + 55, + 561, + 295, + 670 + ], + "type": "text", + "content": "To achieve image deblurring, team IVISLAB introduces the Triple Event-stream Image Deblurring Network (TEIDNet). As depicted in Figure 1, TEIDNet converts consecutive events into event voxels at three temporal scales to perceive motion information from blur images and capture fine edges for reconstructing clear images. Furthermore, TEIDNet integrates Shift Window Attention and Channel-Wise Attention blocks to capture local and global contexts, thereby enhancing deblurring accuracy." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 674, + 178, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 674, + 178, + 685 + ], + "spans": [ + { + "bbox": [ + 55, + 674, + 178, + 685 + ], + "type": "text", + "content": "4.1.1. Network Architecture" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 713 + ], + "type": "text", + "content": "TEIDNet adopts an encoder-decoder architecture to process images and triple-stream event voxels, aiming to estimate" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 318, + 72, + 552, + 173 + ], + "blocks": [ + { + "bbox": [ + 318, + 72, + 552, + 173 + ], + "lines": [ + { + "bbox": [ + 318, + 72, + 552, + 173 + ], + "spans": [ + { + "bbox": [ + 318, + 72, + 552, + 173 + ], + "type": "image", + "image_path": "c1b557d47e8506e24eb01ecd1ef1e495035f8ebd33e86e59e2ddd403108412fb.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 182, + 553, + 202 + ], + "lines": [ + { + "bbox": [ + 313, + 182, + 553, + 202 + ], + "spans": [ + { + "bbox": [ + 313, + 182, + 553, + 202 + ], + "type": "text", + "content": "Figure 1. The model architecture of TEIDNet, proposed by Team IVISLAB." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": "the deblurred image. Specifically, when deblurring the image at frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": ", TEIDNet considers that the long-term event stream surrounding frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " can aid in motion perception. Therefore, it voxelizes the event data from frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t - T_{l}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " to frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t + T_{l}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " into a " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": "-bin event voxel " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "V_{l,t}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": ". Simultaneously, since the short-term event stream around frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " can help reconstruct high-frequency textures, TEIDNet voxelizes the event data from frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t - T_{s}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " to frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t + T_{s}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " into a " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": "-bin event voxel " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "V_{s,t}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": ". Furthermore, to mitigate color artifacts by leveraging higher-resolution motion information near the current frame, TEIDNet voxelizes the event data from frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t - T_{m}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " to frame " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "t + T_{m}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": " into a " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": "-bin event voxel " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "V_{m,t}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": ". Subsequently, the event voxels " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "V_{l,t}, V_{s,t}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "V_{m,t}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": ", along with the blur image " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "I_{b}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": ", are concatenated and fed into the network. To effectively fuse the features from the image and event voxels, TEIDNet employs convolutional layers to generate fused feature representations. The network then utilizes a dual-branch encoder. The first, a complex branch extracts high-level semantic information from the fused features by leveraging shift window attention to capture local context and channel-wise attention blocks to capture global context. The second, a simple branch utilizes convolutional layers to capture fine-grained details from the fused features. Next, TEIDNet's decoder integrates multiple shift window attention blocks to fuse and upsample the features extracted by the dual-branch encoder. Finally, convolutional layers are employed to predict the deblurred image " + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 313, + 224, + 555, + 548 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 552, + 403, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 552, + 403, + 563 + ], + "spans": [ + { + "bbox": [ + 313, + 552, + 403, + 563 + ], + "type": "text", + "content": "4.1.2. Loss Function" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 567, + 553, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 567, + 553, + 592 + ], + "spans": [ + { + "bbox": [ + 313, + 567, + 553, + 592 + ], + "type": "text", + "content": "To train TEIDNet, they define a reconstruction loss " + }, + { + "bbox": [ + 313, + 567, + 553, + 592 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_r" + }, + { + "bbox": [ + 313, + 567, + 553, + 592 + ], + "type": "text", + "content": " for the estimated deblurred image " + }, + { + "bbox": [ + 313, + 567, + 553, + 592 + ], + "type": "inline_equation", + "content": "I_{t}" + }, + { + "bbox": [ + 313, + 567, + 553, + 592 + ], + "type": "text", + "content": " as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 358, + 599, + 553, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 599, + 553, + 613 + ], + "spans": [ + { + "bbox": [ + 358, + 599, + 553, + 613 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {r} = \\lambda_ {1} \\mathrm {L} _ {1} \\left(I _ {t}, I _ {t} ^ {g t}\\right) + \\lambda_ {2} \\mathrm {L} _ {2} \\left(I _ {t}, I _ {t} ^ {g t}\\right) \\tag {1}", + "image_path": "4a60186a472d957c6deb380f0d390f1eecef3fee54b31e378bd9bbb786363b64.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "spans": [ + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "inline_equation", + "content": "\\lambda_{1}" + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "inline_equation", + "content": "\\lambda_{2}" + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "text", + "content": " are coefficients that balance the loss terms. The function " + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "inline_equation", + "content": "\\mathrm{L}_1(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "text", + "content": " represents the mean absolute error, while " + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "inline_equation", + "content": "\\mathrm{L}_2(\\cdot ,\\cdot)" + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "text", + "content": " denotes the mean squared error. The term " + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "inline_equation", + "content": "I_t^{gt}" + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "text", + "content": " refers to the ground truth image at frame " + }, + { + "bbox": [ + 313, + 621, + 553, + 669 + ], + "type": "inline_equation", + "content": "t" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 674, + 443, + 686 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 674, + 443, + 686 + ], + "spans": [ + { + "bbox": [ + 313, + 674, + 443, + 686 + ], + "type": "text", + "content": "4.1.3. Implementation Details" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "content": "TEIDNet is implemented using PyTorch on four Nvidia L20 GPUs. During training, a batch size of 16 is utilized, with" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 70, + 299, + 205 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 299, + 205 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 299, + 205 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 299, + 205 + ], + "type": "image", + "image_path": "8611a01662b0ce6655ccee7173684d1e4b03e2302ec0f1864c85d316a26da03d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 214, + 295, + 236 + ], + "lines": [ + { + "bbox": [ + 55, + 214, + 295, + 236 + ], + "spans": [ + { + "bbox": [ + 55, + 214, + 295, + 236 + ], + "type": "text", + "content": "Figure 2. The framework of DASTF-Net, proposed by Team MiVideoDeblur." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "spans": [ + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": "input data dimensions of " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": " pixels. The network weights are optimized over 1000 epochs using the AdamW optimizer, with an initial learning rate set to " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-5}" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": ". A cosine annealing scheduler is employed to decay the learning rate progressively. In addition, they take the checkpoint with good performance and perform a second finetune. To mitigate overfitting, data augmentation techniques such as random flipping and rotation are applied. They also initialize the backbone network parameters using weights pretrained on ImageNet. The specific coefficients and parameters are defined as follows: number of bins " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "b = 7" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": ", long-term temporal window " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "T_{l} = 5" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": ", medium-term temporal window " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "T_{m} = 1" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": ", short-term temporal window " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "T_{s} = 0" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": ", and loss function weights " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "\\lambda_{1} = 1" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "inline_equation", + "content": "\\lambda_{2} = 1" + }, + { + "bbox": [ + 54, + 258, + 295, + 426 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 433, + 151, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 433, + 151, + 445 + ], + "spans": [ + { + "bbox": [ + 55, + 433, + 151, + 445 + ], + "type": "text", + "content": "4.2. MiVideoDeblur" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 450, + 295, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 450, + 295, + 629 + ], + "spans": [ + { + "bbox": [ + 55, + 450, + 295, + 629 + ], + "type": "text", + "content": "Introduction. As illustrated in Fig. 2, their team proposed the Dual Attention Spatio-Temporal Fusion Network(DASTF-Net). Motivated by EFNet [39], their model employs a two-stage encoder-decoder architecture. Initially, two encoders separately extract multi-scale features from both the image and event data. Based on the EGACA module [40] and the FAF module [45], they have designed the Temporal Fusion Residual Block (TFRB) and Multi-Scale Cross-Attention Fusion Block (MSCAFB), which perform feature fusion in the temporal and spatial dimensions, respectively. By incorporating a dual-attention mechanism, these modules effectively enhance the model's performance. Following feature fusion, the fused features are fed into a Restormer [55], which further leverages the feature information to improve the model's performance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 630, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 713 + ], + "type": "text", + "content": "Training strategy. They employed a four-stage training strategy. In the first stage, the network was trained for 160k iterations using the PSNRLoss function. AdamW Optimizer was used, with an initial learning rate of 2e-4 and a cosine annealing learning rate schedule for updates. Subsequently, in the second stage, data augmentation techniques were introduced, which included adding random Gaussian" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 156 + ], + "type": "text", + "content": "noise and applying random scaling to the input data. Building upon the model from the first stage, the training continued for 80k iterations with an initial learning rate of 1e-4. For the third and fourth stages, the patch size was progressively increased from 256 to 320 and then to 480. The network was trained for 40k iterations in the third stage and 45k iterations in the fourth stage." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 166, + 400, + 178 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 166, + 400, + 178 + ], + "spans": [ + { + "bbox": [ + 313, + 166, + 400, + 178 + ], + "type": "text", + "content": "4.3. 404NotFound" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 184, + 555, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 184, + 555, + 376 + ], + "spans": [ + { + "bbox": [ + 313, + 184, + 555, + 376 + ], + "type": "text", + "content": "Their team proposes EV-Deblurformer[26], a framework consisting of two complementary models designed to fully leverage the temporal dynamics of video sequences and the rich texture details present in single images. The framework includes two distinct components: Video-SFHformer, developed for video-based deblurring, and EFSformer, tailored for single-image deblurring. In Video-SFHformer, they introduce STFBlock to enhance the model's capacity for long-range temporal modeling. In EFSformer, they incorporate STEFusionBlock, which fuses event features from the frequency domain to improve spatial detail restoration. To achieve optimal performance, as shown in Section 4.3.3, a sequence-level ensemble strategy is employed to merge the outputs of both models. A progressive training scheme is also adopted to enhance robustness and effectiveness." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 384, + 411, + 396 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 384, + 411, + 396 + ], + "spans": [ + { + "bbox": [ + 313, + 384, + 411, + 396 + ], + "type": "text", + "content": "4.3.1. Overall Pipeline" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 400, + 554, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 400, + 554, + 496 + ], + "spans": [ + { + "bbox": [ + 313, + 400, + 554, + 496 + ], + "type": "text", + "content": "Figure 3 illustrates the overall architecture of their proposed method, EV-Deblurformer. This approach, built upon the two models: Video SFHformer and EFSformer, fully exploits the rich temporal dynamics and sharp edge information provided by event data. For the video deblurring model, they propose the Video-SFHformer based on SFHformer. For the single-image motion deblurring model, they propose the EFSformer built on EFNet[39]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 505, + 443, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 505, + 443, + 517 + ], + "spans": [ + { + "bbox": [ + 313, + 505, + 443, + 517 + ], + "type": "text", + "content": "4.3.2. Implementation Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 521, + 554, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 554, + 592 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 554, + 592 + ], + "type": "text", + "content": "They implement their proposed network via the PyTorch 2.1.2 platform. Adam optimizer with parameters " + }, + { + "bbox": [ + 313, + 521, + 554, + 592 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 313, + 521, + 554, + 592 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 521, + 554, + 592 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.999" + }, + { + "bbox": [ + 313, + 521, + 554, + 592 + ], + "type": "text", + "content": " is adopted to optimize their network. Motivated by [55] they introduce the progressive training strategy. The training phase of their network could be divided into two stages:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": "(1) Initial training of EV-Deblurformer. They use a progressive training strategy at first. For the video-based motion deblurring model, they start training with patch size " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "152 \\times 152" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " with batch size of 16 for 250K iterations. The patch size and batch size pairs are updated to " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "[(192^2, 12), (256^2, 8), (304^2, 8)]" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " at iterations [250K, 200K, 150K]. The initial learning rate is " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " and remains unchanged when patch size is 192. Later, the learning rate is set to " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "7 \\times 10^{-5}" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " for patch and batch size pairs of " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "(256^2, 8)" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "inline_equation", + "content": "(304^2, 8)" + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": ", respectively. They employ a" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 68, + 279, + 223 + ], + "blocks": [ + { + "bbox": [ + 82, + 68, + 279, + 223 + ], + "lines": [ + { + "bbox": [ + 82, + 68, + 279, + 223 + ], + "spans": [ + { + "bbox": [ + 82, + 68, + 279, + 223 + ], + "type": "image", + "image_path": "9f72a3b5343b2954180c60b89da07dea92ecc4de71c2198cbf209ef89d3a6ac9.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 282, + 68, + 527, + 223 + ], + "blocks": [ + { + "bbox": [ + 282, + 68, + 527, + 223 + ], + "lines": [ + { + "bbox": [ + 282, + 68, + 527, + 223 + ], + "spans": [ + { + "bbox": [ + 282, + 68, + 527, + 223 + ], + "type": "image", + "image_path": "d4312036562aa983d2f712981c79dd05fea8c0c2fd66cd31016a474e139d5a33.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 82, + 227, + 526, + 380 + ], + "blocks": [ + { + "bbox": [ + 82, + 227, + 526, + 380 + ], + "lines": [ + { + "bbox": [ + 82, + 227, + 526, + 380 + ], + "spans": [ + { + "bbox": [ + 82, + 227, + 526, + 380 + ], + "type": "image", + "image_path": "a3e30ae4e1827997e27cf7621f6fffe30946062565c228e2edb467ce551cb086.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 388, + 555, + 401 + ], + "lines": [ + { + "bbox": [ + 55, + 388, + 555, + 401 + ], + "spans": [ + { + "bbox": [ + 55, + 388, + 555, + 401 + ], + "type": "text", + "content": "Figure 3. The architecture diagram of EV-Deblurformer, proposed by Team 404NotFound, is designed for event-guided motion deblurring." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 421, + 298, + 715 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "spans": [ + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": "cosine annealing learning rate decay strategy, gradually reducing the learning rate. For the single-image-based motion deblurring model, They begin training with a patch size of " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "192 \\times 192" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": " and a batch size of 12 for 250K iterations. During training, patch size and batch size pairs are progressively updated to " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "(256^{2}, 10)" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "(288^{2}, 8)" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "(320^{2}, 8)" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": " at 36K, 24K, and 24K iterations, respectively. The initial learning rate is set to " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-4}" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": ", and later adjusted to " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "7 \\times 10^{-5}" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "inline_equation", + "content": "5 \\times 10^{-5}" + }, + { + "bbox": [ + 54, + 421, + 297, + 589 + ], + "type": "text", + "content": " corresponding to the updated patch and batch size configurations. A cosine annealing schedule is employed to gradually decay the learning rate throughout the training process. The first stage is performed on the NVIDIA RTX 4090 GPU. They obtain the best model at this stage as the initialization of the second stage." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "text", + "content": "(2) Fine-tuning EV-Deblurformer. For the video-based motion deblurring model, they start training with a patch size of " + }, + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "inline_equation", + "content": "320 \\times 320" + }, + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "text", + "content": " and a batch size of 4 for 150K iterations. The initial learning rate is set to " + }, + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "text", + "content": " and is adjusted to " + }, + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-7}" + }, + { + "bbox": [ + 54, + 594, + 298, + 715 + ], + "type": "text", + "content": " using a cosine annealing schedule, over a total of 150K iterations. They use the entire training data from the challenge without applying any data augmentation techniques. The exponential moving average (EMA) is employed for the dynamic adjustment of the model parameters. For the single-image-based motion deblurring model, they" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 421, + 555, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 421, + 555, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 421, + 555, + 456 + ], + "type": "text", + "content": "adopt the same training strategy as used in the video-based motion deblurring model. The second training stage is conducted on an NVIDIA RTX 4090 GPU." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 458, + 556, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 458, + 556, + 542 + ], + "spans": [ + { + "bbox": [ + 313, + 458, + 556, + 542 + ], + "type": "text", + "content": "(3) Evaluation Metrics They utilize two widely adopted reference-based evaluation metrics—Peak Signal-to-Noise Ratio (PSNR) and Structural Similarity Index Measure (SSIM)[49]—to evaluate the effectiveness of their method, following prior works[3, 24, 54, 55]. Higher PSNR and SSIM values generally reflect better performance in image restoration tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 552, + 430, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 552, + 430, + 565 + ], + "spans": [ + { + "bbox": [ + 313, + 552, + 430, + 565 + ], + "type": "text", + "content": "4.3.3. Ensemble Strategies" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 568, + 555, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 568, + 555, + 629 + ], + "spans": [ + { + "bbox": [ + 313, + 568, + 555, + 629 + ], + "type": "text", + "content": "Ensemble learning has been proven to be an effective technique in image restoration. Its most basic application involves integrating the outputs of multiple models and applying a fusion strategy to achieve results with better generalization and greater stability in restoration quality." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 630, + 556, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 630, + 556, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 630, + 556, + 713 + ], + "type": "text", + "content": "The HighREV-test dataset consists of four sequences. Among them, one is an outdoor scene, which differs markedly from the other three in terms of object diversity, texture richness, and color composition. Based on this observation, they explore a sequence-level ensemble strategy that selectively exchanges outputs between Video-SFHformer and EFSformer." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 70, + 284, + 125 + ], + "blocks": [ + { + "bbox": [ + 67, + 70, + 284, + 125 + ], + "lines": [ + { + "bbox": [ + 67, + 70, + 284, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 70, + 284, + 125 + ], + "type": "image", + "image_path": "53355ba7ec22077eb962d8f0e9c27950cccf02c59a1dd759a83c4d3ea57b20ea.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 133, + 295, + 166 + ], + "lines": [ + { + "bbox": [ + 55, + 133, + 295, + 166 + ], + "spans": [ + { + "bbox": [ + 55, + 133, + 295, + 166 + ], + "type": "text", + "content": "Figure 4. An overview of the method proposed by Team BUPTMM: They set the weights for the fusion, with " + }, + { + "bbox": [ + 55, + 133, + 295, + 166 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 55, + 133, + 295, + 166 + ], + "type": "text", + "content": " set to 0.6 and " + }, + { + "bbox": [ + 55, + 133, + 295, + 166 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 55, + 133, + 295, + 166 + ], + "type": "text", + "content": " to 0.4." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 192, + 296, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 192, + 296, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 296, + 277 + ], + "type": "text", + "content": "Specifically, they start with the best-performing Video-SFHformer model and replace the output of the outdoor sequence in the HighREV-test set with the corresponding result generated by EFSformer. The results in Table 1 show that their approach yields the best performance, achieving the highest SSIM score and ranking third overall in the NTIRE Event-Based Image Deblurring Challenge." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 292, + 138, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 292, + 138, + 304 + ], + "spans": [ + { + "bbox": [ + 55, + 292, + 138, + 304 + ], + "type": "text", + "content": "4.4. Give_it_a_try" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 312, + 153, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 312, + 153, + 323 + ], + "spans": [ + { + "bbox": [ + 55, + 312, + 153, + 323 + ], + "type": "text", + "content": "4.4.1. General method" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 330, + 296, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 330, + 296, + 402 + ], + "spans": [ + { + "bbox": [ + 55, + 330, + 296, + 402 + ], + "type": "text", + "content": "This submission is mainly based on the public code of another team. Models used in this submission are EFNet att track fusion and EFNet att track fusion new, which can be found atarchs orarchs/tested. They change the training strategy, finetune the models and combine two best models to push the limits of scoring." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 409, + 296, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 409, + 296, + 468 + ], + "spans": [ + { + "bbox": [ + 56, + 409, + 296, + 468 + ], + "type": "text", + "content": "- How event modality is utilized in the deblurring process: They used the given SCER format event voxels in training, validating and training. The usage is as same as original EFNet [39] since new networks retain the encoder module of the baseline." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 483, + 182, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 483, + 182, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 483, + 182, + 495 + ], + "type": "text", + "content": "4.4.2. Implementation details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 498, + 105, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 498, + 105, + 510 + ], + "spans": [ + { + "bbox": [ + 56, + 498, + 105, + 510 + ], + "type": "text", + "content": "- Training:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": "In the first stage of training, all models are trained for " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "2 \\times 10^{5}" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": " iterations with a batch size of 16 by PSNR loss function with AdamW optimizer. In each training batch, each paired images and event voxel are randomly cropped to " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": " and augmented by random flipping and rotation. The learning rate is initialized as " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "3 \\times 10^{-4}" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": ", and a cosine annealing scheduler is used to drop the final learning rate as " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "10^{-7}" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": ". They finetuned the models obtained from the first stage with a patch size of " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "512 \\times 512" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": ". At this stage, all models are trained for another " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "2 \\times 10^{5}" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": " iterations with a batch size of 4 and the learning rate drop from " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-5}" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "10^{-6}" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": ". Models are validated for every " + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "inline_equation", + "content": "10^{4}" + }, + { + "bbox": [ + 63, + 510, + 295, + 665 + ], + "type": "text", + "content": " iterations. Other settings remain unchanged." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 666, + 160, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 666, + 160, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 666, + 160, + 677 + ], + "type": "text", + "content": "- Validating and Testing:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 63, + 677, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 677, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 63, + 677, + 295, + 713 + ], + "type": "text", + "content": "They chose the highest validated models for each network during the fine-tuning stage and average two models' output as final result to improve robustness." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 72, + 388, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 388, + 83 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 388, + 83 + ], + "type": "text", + "content": "4.5. BUPTMM" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 90, + 396, + 101 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 90, + 396, + 101 + ], + "spans": [ + { + "bbox": [ + 314, + 90, + 396, + 101 + ], + "type": "text", + "content": "4.5.1. Architecture" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 106, + 554, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 106, + 554, + 166 + ], + "spans": [ + { + "bbox": [ + 313, + 106, + 554, + 166 + ], + "type": "text", + "content": "Our solution is built on EFNet[39] and STCNet[52]. Inspired by [50], they introduce a detail enhancement module that follows the EFNet prediction stage. The whole pipeline is illustrated in Fig. 4. The detail enhancement module adopts a simple U-Net structure." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 175, + 443, + 186 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 175, + 443, + 186 + ], + "spans": [ + { + "bbox": [ + 313, + 175, + 443, + 186 + ], + "type": "text", + "content": "4.5.2. Implementation Details" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 190, + 554, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 190, + 554, + 262 + ], + "spans": [ + { + "bbox": [ + 313, + 190, + 554, + 262 + ], + "type": "text", + "content": "Both EFNet and STCNet are initialized with pre-trained GoPro checkpoints. They fine-tune them separately using the NTIRE official training dataset without additional data, aside from the pre-trained GoPro weights. The patch size is set to " + }, + { + "bbox": [ + 313, + 190, + 554, + 262 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 313, + 190, + 554, + 262 + ], + "type": "text", + "content": ", and they employ the CosineAnnealingLR scheduler to adjust the learning rate." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 263, + 554, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 263, + 554, + 285 + ], + "spans": [ + { + "bbox": [ + 313, + 263, + 554, + 285 + ], + "type": "text", + "content": "The key differences in the training strategies for EFNet and STCNet are as follows:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 288, + 554, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 288, + 554, + 418 + ], + "spans": [ + { + "bbox": [ + 313, + 288, + 554, + 418 + ], + "type": "text", + "content": "For EFNet, they train EFNet for 100k iterations with a batch size of 4 using 4 NVIDIA H800 GPUs. The optimizer is AdamW with an initial learning rate of 2e-4. They generate the event voxel grid following the official script, setting the bin size to 24. Due to differences in the event encoder's channel size, they extended the pre-trained GoPro checkpoint weights from 6 to 24 bins. The loss function consists of the L1 loss, the Charbonnier loss, and the Sobel loss, with respective weights of 1.0, 0.5, and 0.5. Unlike the official EFNet implementation, they do not apply a mask between the two stages." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 420, + 554, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 420, + 554, + 479 + ], + "spans": [ + { + "bbox": [ + 313, + 420, + 554, + 479 + ], + "type": "text", + "content": "ForNet, they train STCNet for 1000 epochs with a batch size of 8 using 4 NVIDIA H800 GPUs. The optimizer is Adam with an initial learning rate of 2e-4. They use the official event voxel grid with a bin size of 6. The loss function is the Charbonnier loss." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 491, + 359, + 501 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 491, + 359, + 501 + ], + "spans": [ + { + "bbox": [ + 314, + 491, + 359, + 501 + ], + "type": "text", + "content": "4.6. WEI" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 508, + 554, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 508, + 554, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 508, + 554, + 593 + ], + "type": "text", + "content": "Since REFID [40] is an excellent method of event-based blurry video frame interpolation (VFI), considering the differences in modeling image deblurring and VFI problems, they adapt the REFID structure to fit the image deblurring challenge. As shown in Fig. 5, they develop a Bi-directional Gathered Recurrent Network (BGRN) for event-based image deblurring." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 601, + 436, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 601, + 436, + 612 + ], + "spans": [ + { + "bbox": [ + 313, + 601, + 436, + 612 + ], + "type": "text", + "content": "4.6.1. Network Architecture" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "spans": [ + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "content": "Following REFID [40], the events within the exposure time " + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "inline_equation", + "content": "(t - \\Delta t\\to t + \\Delta t)" + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "content": " are represented as a voxel grid " + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "inline_equation", + "content": "V_{t - \\Delta t\\rightarrow t + \\Delta t}\\in \\mathbb{R}^{(M + 1)\\times H\\times W}" + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "content": " is set to 9. Furthermore, they divide the voxel " + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "inline_equation", + "content": "V_{t - \\Delta t\\rightarrow t + \\Delta t}" + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "content": " into two segments " + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "inline_equation", + "content": "V_{t - \\Delta t\\rightarrow t}" + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "inline_equation", + "content": "V_{t + \\Delta t\\rightarrow t}" + }, + { + "bbox": [ + 313, + 616, + 554, + 689 + ], + "type": "text", + "content": " to perform forward and backward iterations, respectively." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "text", + "content": "The BGRN consists of image and event branches. Only a blurry image " + }, + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "inline_equation", + "content": "B_{t}" + }, + { + "bbox": [ + 313, + 689, + 554, + 713 + ], + "type": "text", + "content": " is fed into the image branch, and the" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 67, + 68, + 545, + 305 + ], + "blocks": [ + { + "bbox": [ + 67, + 68, + 545, + 305 + ], + "lines": [ + { + "bbox": [ + 67, + 68, + 545, + 305 + ], + "spans": [ + { + "bbox": [ + 67, + 68, + 545, + 305 + ], + "type": "image", + "image_path": "9f8be6c650b99795b0cdb6d83e07801f8311e1e0282f1bf2309fd93b92bb1313.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 54, + 310, + 555, + 355 + ], + "lines": [ + { + "bbox": [ + 54, + 310, + 555, + 355 + ], + "spans": [ + { + "bbox": [ + 54, + 310, + 555, + 355 + ], + "type": "text", + "content": "Figure 5. The architecture of the Bi-directional Gathered Recurrent Network (BGRN), proposed by Team Wei, is designed for event-based image deblurring and serves as an enhanced reconfiguration network for REFID. [40]. \"EVR Block\": event recurrent block [40], \"EGACA\": event-guided adaptive channel attention [40], \"SConv\": stripped convolution, \"TConv\": transposed convolution, \"Bi-Fusion\": bidirectional fusion." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "spans": [ + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": "network output is the corresponding sharp image " + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "inline_equation", + "content": "\\hat{I}_t" + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": ". Besides, they split the original event branch into a forward recurrent branch and a backward recurrent branch, which respectively and recurrently consumes sub-voxels of forward event voxel " + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "inline_equation", + "content": "V_{t - \\Delta t\\to t}" + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": " and backward event voxel " + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "inline_equation", + "content": "V_{t + \\Delta t\\rightarrow t}" + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": " in a gathered way. In each recurrent iteration, the sub-voxel " + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "inline_equation", + "content": "V_{sub}\\in \\mathbb{R}^{2\\times H\\times W}" + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": " is fed to the event branch, which encodes the event information for the latent frame. To fuse the features obtained from forward and backward recurrent branching, the outputs of both directions are fed into a channel cascade and " + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "inline_equation", + "content": "1\\times 1" + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": " convolution at each scale (\"Bi-Fusion\" in Fig. 5). Then, they are added element by element with the features of the corresponding scale of the decoder. In addition, to reduce redundancy, they removed the recurrent structure of the decoder section and replaced it with residual blocks. Finally, to make the network learn high-frequency information, the output of the last residual block and the initial features of the blurred image are added element by element, and then the sharp image " + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "inline_equation", + "content": "\\hat{I}_t" + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": " is obtained through a " + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "inline_equation", + "content": "3\\times 3" + }, + { + "bbox": [ + 56, + 375, + 296, + 616 + ], + "type": "text", + "content": " convolution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 625, + 183, + 639 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 625, + 183, + 639 + ], + "spans": [ + { + "bbox": [ + 55, + 625, + 183, + 639 + ], + "type": "text", + "content": "4.6.2. Implementation details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 641, + 297, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 641, + 297, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 641, + 297, + 715 + ], + "type": "text", + "content": "Training strategy. They train BGRN with the HighREV training dataset specified by the organizer with a batch size of 4 for 200k iterations on an NVIDIA GeForce RTX 3090 GPU. They crop the input images and event voxels to " + }, + { + "bbox": [ + 55, + 641, + 297, + 715 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 641, + 297, + 715 + ], + "type": "text", + "content": " for training and use horizontal and vertical flips for data enhancement. AdamW [29] with an initial learning" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "text", + "content": "rate of " + }, + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "text", + "content": " and a cosine learning rate annealing strategy with " + }, + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-7}" + }, + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "text", + "content": " as the minimum learning rate are adopted for optimization. They use a PSNR loss [39] as supervision. Ensemble strategy. During testing, they found that images prefixed with \"zigzag\" showed a large difference in brightness compared to other normal images. To adapt to this sudden change in brightness, they select images with the prefix \"sternwatz_window\" similar to this scene from the training set. Then, they double their brightness to fine-tune the pre-trained BGRN model for 5k iterations with an initial learning rate of " + }, + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-5}" + }, + { + "bbox": [ + 313, + 376, + 555, + 555 + ], + "type": "text", + "content": ". Therefore, the ensemble strategy is applied when testing, i.e., the abnormally bright images (prefixed with \"zigzag\") are processed with the fine-tuned model, and the others are processed with the initial pretrained model." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 561, + 390, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 561, + 390, + 573 + ], + "spans": [ + { + "bbox": [ + 313, + 561, + 390, + 573 + ], + "type": "text", + "content": "4.7.DVS-WHU" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 578, + 436, + 590 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 578, + 436, + 590 + ], + "spans": [ + { + "bbox": [ + 313, + 578, + 436, + 590 + ], + "type": "text", + "content": "4.7.1. Network Architecture" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "text", + "content": "Positioned at Fig. 6, the proposed Dual Channel Cross-modal Mamba (DCCM) architecture comprises three primary components: two Shallow Feature Extraction (SFE) modules, a series of " + }, + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "text", + "content": " dual channel blocks (with " + }, + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "inline_equation", + "content": "N = 20" + }, + { + "bbox": [ + 313, + 594, + 556, + 715 + ], + "type": "text", + "content": " in their experimental configuration), each containing two Residual Dense Blocks (RDB) [57] and two Cross Modal Mamba (CMM) [14] blocks, and a Global Feature Fusion (GFF) module. Initially, both blur image and events (represented in 24-bin voxel grids) are processed through the SFE module for preliminary feature extraction. Subsequently," + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 71, + 286, + 191 + ], + "blocks": [ + { + "bbox": [ + 63, + 71, + 286, + 191 + ], + "lines": [ + { + "bbox": [ + 63, + 71, + 286, + 191 + ], + "spans": [ + { + "bbox": [ + 63, + 71, + 286, + 191 + ], + "type": "image", + "image_path": "c721fffa4b1c80802496768db4fde67eeb3302413c722054c2d6fddc7e8ce50f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 59, + 193, + 293, + 281 + ], + "blocks": [ + { + "bbox": [ + 59, + 193, + 293, + 281 + ], + "lines": [ + { + "bbox": [ + 59, + 193, + 293, + 281 + ], + "spans": [ + { + "bbox": [ + 59, + 193, + 293, + 281 + ], + "type": "image", + "image_path": "8c4442be526dca917ebfb53309df0e69db477c4ded87f55e59353dfeadb1e786.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 290, + 294, + 303 + ], + "lines": [ + { + "bbox": [ + 56, + 290, + 294, + 303 + ], + "spans": [ + { + "bbox": [ + 56, + 290, + 294, + 303 + ], + "type": "text", + "content": "Figure 6. Architecture of DCCM, proposed by Team DVS-WHU." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 322, + 295, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 322, + 295, + 357 + ], + "spans": [ + { + "bbox": [ + 55, + 322, + 295, + 357 + ], + "type": "text", + "content": "the dual channel blocks facilitate in-depth feature extraction and cross-modal interaction. Finally, the GFF module synthesizes the ultimate latent sharp image." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 357, + 295, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 357, + 295, + 490 + ], + "spans": [ + { + "bbox": [ + 54, + 357, + 295, + 490 + ], + "type": "text", + "content": "The core concept of their network is to establish a mutual compensatory relationship between the features derived from event data and those from blurred images through a dual-channel framework. Specifically, while event data are often characterized by significant noise, images typically exhibit lower noise levels. The CMM block is employed to incorporate image features into the event data, thereby mitigating the noise present in the events. Conversely, event data are rich in sharp edge information, and the CMM block also facilitates the integration of event features into blurred images, ultimately contributing to the deblurred result." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 495, + 185, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 495, + 185, + 506 + ], + "spans": [ + { + "bbox": [ + 55, + 495, + 185, + 506 + ], + "type": "text", + "content": "4.7.2. Implementation Details" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "text", + "content": "The network is created with PyTorch and trained on two NVIDIA GeForce RTX 3090 GPUs for 150 epochs with ground-truth-guided L1 norm loss. The training process is composed of two phases. During the first phase, they follow the strategy of Cheng et al.[6] and pretrain their DCCM on the mixed dataset including synthetic REDS dataset[35] and semi-synthetic HQF dataset[38] with a learning rate fixed at " + }, + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "text", + "content": " for 50 epochs. In the second phase, the network is fine-tuned on the HighREV dataset[40] where the images are randomly cropped into " + }, + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "text", + "content": " patches with horizontal flipping for data augmentation and the learning rate linearly decays to " + }, + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-5}" + }, + { + "bbox": [ + 55, + 510, + 295, + 654 + ], + "type": "text", + "content": " until the 150th epoch." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 660, + 134, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 660, + 134, + 672 + ], + "spans": [ + { + "bbox": [ + 55, + 660, + 134, + 672 + ], + "type": "text", + "content": "4.8. PixelRevive" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 677, + 295, + 715 + ], + "type": "text", + "content": "The model they used was the same as the EFNet[39]. The key to the improved performance of their model lied in the utilization of additional datasets during training and" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 553, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 262 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 262 + ], + "type": "text", + "content": "the adoption of larger image sizes in the final fine-tuning phase. They employed a two-stage training strategy. First, they used an Events Simulator called V2E[15] to generate Events from REDS dataset. To generate the dataset, they used timestamp resolution as 0.001, dvs exposure duration as 0.001. The remaining parameters were configured identical to those specified in the V2E paper. They get over 20,000 pairs of events, blur images and sharp images. They trained the model on REDS for 250,000 iters, with gt_size 256, patch size 8. When training on simulated datasets with the HighREV validation set, they observed a paradoxical divergence: while the training PSNR consistently improved, the validation PSNR exhibited a decline. This counterintuitive phenomenon may stem from distributional discrepancies between synthetic data and HighREV characteristics across multiple feature dimensions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 263, + 555, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 263, + 555, + 384 + ], + "spans": [ + { + "bbox": [ + 313, + 263, + 555, + 384 + ], + "type": "text", + "content": "Then, they finetuned it on HighREV train dataset for 200,000 iters, with gt_size 512, patch size 8. The True-CosineAnnealingLR scheduler was employed in both training phases, configured with a period matching the total training iterations and a minimum learning rate value of 1e-7. After experiments, they found that larger gt_size can improve the PSNR by about 0.5. Experiments showed performance decreases when gt_size exceeds 512 (tested range: 256-608), making 512 the optimal size. Other strategy is same as the EFNet." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 389, + 362, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 389, + 362, + 401 + ], + "spans": [ + { + "bbox": [ + 313, + 389, + 362, + 401 + ], + "type": "text", + "content": "4.9. CHD" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 407, + 554, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 407, + 554, + 479 + ], + "spans": [ + { + "bbox": [ + 313, + 407, + 554, + 479 + ], + "type": "text", + "content": "As illustrated in Fig. 7, team CHD develops an efficient Event-Image Deblurformer Network (EIDFNet) based on the Restormer architecture [55]. To address the computational bottleneck encountered when restoring high-resolution blurry images using event data, they incorporate key design elements from EFNet [39]." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 483, + 437, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 483, + 437, + 495 + ], + "spans": [ + { + "bbox": [ + 313, + 483, + 437, + 495 + ], + "type": "text", + "content": "4.9.1. Network Architecture" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 498, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 555, + 714 + ], + "type": "text", + "content": "Considering the speed of model training, they still used the official 6-channel voxel grid event representation to achieve a balance between efficiency and precision. They input the blurred image and the event representation with consistent spatial resolution into the network and employ the modified Transformer Block to fuse the cross-modal feature. Firstly, they modify the transformer block in Restormer [55] as a fusion module to achieve full interaction between different feature channels by setting the number of input and output dims in the GDFN and adding " + }, + { + "bbox": [ + 313, + 498, + 555, + 714 + ], + "type": "inline_equation", + "content": "1 \\times 1" + }, + { + "bbox": [ + 313, + 498, + 555, + 714 + ], + "type": "text", + "content": " convolution in the residual connections. Additionally, they build a mutually enhanced fusion encoder based on the Event-Image CrossModal Attention Fusion Module (EICA) proposed in EFNet [39]. The enhanced image features are obtained using K and V derived from event embeddings, while Q is sourced from image embeddings. Conversely, the enhanced event features are generated with K and V originating from image embeddings, with Q being drawn from event embeddings." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 70, + 292, + 338 + ], + "blocks": [ + { + "bbox": [ + 56, + 70, + 292, + 338 + ], + "lines": [ + { + "bbox": [ + 56, + 70, + 292, + 338 + ], + "spans": [ + { + "bbox": [ + 56, + 70, + 292, + 338 + ], + "type": "image", + "image_path": "45b31251263c662d830970b24c8d4673eaabe0b4c9988af96695c9a7b73ede74.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 346, + 295, + 369 + ], + "lines": [ + { + "bbox": [ + 55, + 346, + 295, + 369 + ], + "spans": [ + { + "bbox": [ + 55, + 346, + 295, + 369 + ], + "type": "text", + "content": "Figure 7. The framework of Event-Image Deblurformer Network (EIDFNet), proposed by Team CHD." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 54, + 403, + 295, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 403, + 295, + 535 + ], + "spans": [ + { + "bbox": [ + 54, + 403, + 295, + 535 + ], + "type": "text", + "content": "In order to achieve comprehensive integration of event and image features, the enhanced image features and enhanced event features are concatenated along the channel dimension. Subsequently, these concatenated features are fused using a Modified Transformer Block. Ultimately, each encoder produces enhanced image features, enhanced event features, and fused features. The enhanced event and image features undergo downsampling before being input into the subsequent encoder. The fusion feature is directly linked to the corresponding decoding feature through a skip connection." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 560, + 160, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 560, + 160, + 572 + ], + "spans": [ + { + "bbox": [ + 55, + 560, + 160, + 572 + ], + "type": "text", + "content": "4.9.2. Training Strategy" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 54, + 582, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 582, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 582, + 295, + 715 + ], + "type": "text", + "content": "They perform progressive learning strategy flow the settings in Restormer [55] and trained the model on a A100 GPU with L1 loss. The network is trained on smaller image patches in the early epochs and on gradually larger patches in the later training epochs. During the training process, the batch sizes are [4,3,2,2,1,1], and the patch sizes are [128,160,192,256,320,384] with the iterations are [92000,64000,48000,36000,36000,24000]. They employ the AdamW optimizer with an initial learning rate 3e-4 that follows a CosineAnnealingRestartCyclicLR decay strategy." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 320, + 75, + 549, + 230 + ], + "blocks": [ + { + "bbox": [ + 320, + 75, + 549, + 230 + ], + "lines": [ + { + "bbox": [ + 320, + 75, + 549, + 230 + ], + "spans": [ + { + "bbox": [ + 320, + 75, + 549, + 230 + ], + "type": "image", + "image_path": "b9251cf46272ae3008bd4f011fa63a877d1c023a96ebea5494bbf7f13e3a452f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 243, + 544, + 255 + ], + "lines": [ + { + "bbox": [ + 323, + 243, + 544, + 255 + ], + "spans": [ + { + "bbox": [ + 323, + 243, + 544, + 255 + ], + "type": "text", + "content": "Figure 8. Overview of the proposed pipeline by Team SMU." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 314, + 275, + 367, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 275, + 367, + 286 + ], + "spans": [ + { + "bbox": [ + 314, + 275, + 367, + 286 + ], + "type": "text", + "content": "4.10. SMU" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 293, + 394, + 304 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 293, + 394, + 304 + ], + "spans": [ + { + "bbox": [ + 314, + 293, + 394, + 304 + ], + "type": "text", + "content": "4.10.1. Motivation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 308, + 553, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 308, + 553, + 463 + ], + "spans": [ + { + "bbox": [ + 313, + 308, + 553, + 463 + ], + "type": "text", + "content": "Inspired by recent successes in cross-knowledge sharing between events and RGB frames [39], hierarchical temporal and frequency modelling [18, 40] and stage-wise fine-fusion [20] for the task of event-based RGB deblurring, they propose to modify the base EFNet model [39] such that the modified model serves as a unified framework which (1) iteratively fine-tunes the coarser deblurred images through two stages of extensive fine-fusion to combat the insufficiencies of the existing decoding techniques while (2) can optionally be made to be specifically aware of propagated frequency information in latent representations to locally and globally filter the blur features in the RGB images through leveraging event features in the frequency domain." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 464, + 554, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 464, + 554, + 619 + ], + "spans": [ + { + "bbox": [ + 313, + 464, + 554, + 619 + ], + "type": "text", + "content": "In addition, to the best knowledge, none of the existing methods for event-based RGB deblurring recognizes the importance of feature tracking in this task which can be beneficial especially in challenging conditions such as high contrast (i.e. very bright or dark surroundings) and fast motion (i.e., large pixel displacements within an accumulated event volume) scenarios [33] towards robust performance. To address this limitation, they explicitly employ a data-driven feature tracking module in the pipeline, an inline feature tracker block, such that event feature tracks corresponding to different points in the reference RGB frame are intuitively incorporated in the learning process specifically in the initial stages of the unified framework." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 627, + 441, + 637 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 627, + 441, + 637 + ], + "spans": [ + { + "bbox": [ + 313, + 627, + 441, + 637 + ], + "type": "text", + "content": "4.10.2. Network Architecture" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 642, + 554, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 642, + 554, + 712 + ], + "spans": [ + { + "bbox": [ + 313, + 642, + 554, + 712 + ], + "type": "text", + "content": "As depicted in Fig. 8, they propose three main modifications: the inline feature tracker module, bidirectional frame fusion and AdaRevD refinement, to the original EFNet, backed by the motivation as described in section 4.10.1 and validated through the experiments. To this end, they design the inline feature tracker such that the latent RGB and event" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 191 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 191 + ], + "type": "text", + "content": "features are merged and learned through a flow autoencoder block in combination with a Conv-LSTM block to retrieve the temporal alignment of features. Furthermore, it is to be noted that they place the tracker at an initial stage of the pipeline to ensure that the tracker has the access to the high-level features of each modality, rather than the deeper low-level features, since high-level features, which are close to the input data, are more promising to contain information on temporal propagation, which is critical for co-aligned feature tracking." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 192, + 295, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 192, + 295, + 371 + ], + "spans": [ + { + "bbox": [ + 55, + 192, + 295, + 371 + ], + "type": "text", + "content": "Inspired by [20], they design the first stage of refinement using a bidirectional frame fusion block, specifically targeting the spatiotemporal information flow between adjacent coarse frames while in the second stage of refinement, they further refine the output from the first refinement stage with an objective to identify the still remaining degradation patterns in the RGB space and tackle them using an adaptive patch exiting reversible decoder module [31]. Optionally, to implement the frequency-based filtering of blur features, they follow the cross-modal frequency (CMF) module proposed by [18] such that latent representations at each level of the first U-Net are passed through CMF modules, and concatenated in the decoder levels, in a hierarchical fashion to enhance the latent feature representations with frequency-aware characteristics." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 376, + 189, + 387 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 376, + 189, + 387 + ], + "spans": [ + { + "bbox": [ + 55, + 376, + 189, + 387 + ], + "type": "text", + "content": "4.10.3. Implementation Details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 390, + 294, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 390, + 294, + 462 + ], + "spans": [ + { + "bbox": [ + 55, + 390, + 294, + 462 + ], + "type": "text", + "content": "They train the models using one NVIDIA 3090 GPU machine in two stages: (1) primary event-RGB fusion pipeline including the proposed frequency-aware module, explicit feature tracking and the first iteration of refinement based on the bidirectional frame fusion block and (2) second iteration of refinement based on AdaRevD framework [31]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 463, + 294, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 463, + 294, + 522 + ], + "spans": [ + { + "bbox": [ + 55, + 463, + 294, + 522 + ], + "type": "text", + "content": "By following the baseline implementation [39], they train the models on the HighREV dataset, in both stages, with an initial learning rate of " + }, + { + "bbox": [ + 55, + 463, + 294, + 522 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 55, + 463, + 294, + 522 + ], + "type": "text", + "content": " for a total of " + }, + { + "bbox": [ + 55, + 463, + 294, + 522 + ], + "type": "inline_equation", + "content": "2 \\times 10^{4}" + }, + { + "bbox": [ + 55, + 463, + 294, + 522 + ], + "type": "text", + "content": " iterations. The utilized optimizer is AdamW [29] and the learning objective is set to be PSNR loss [39]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 528, + 121, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 528, + 121, + 540 + ], + "spans": [ + { + "bbox": [ + 55, + 528, + 121, + 540 + ], + "type": "text", + "content": "4.11.JNU620" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 545, + 295, + 713 + ], + "type": "text", + "content": "As shown in Fig. 9, their framework adopts EFNet [39] as the baseline architecture. To enchance frequency-aware feature processing, a selection frequency block (SF Block) [9] is integrated following each decoder. The architecture introduces two key components: 1) A multi-branch dynamic selection frequency (MDSF) module that adaptively decouples feature mappings into distinct frequency components through dynamic convolution operations; 2) A multi-branch compact selection frequency (MCSF) module specifically designed to expand the receptive field for processing degraded blurry images. Multiple data augmentation strategies were employed, including horizontal and vertical shiftings. For data preparation, they implemented multiple augmentation strategies including horizontal and vertical spa" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": "tial shifts. The model was trained for 120,000 iterations on an NVIDIA GeForce RTX 3090 GPU with a batch size of 4. The models were optimized by the Adam method with " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.99" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": " and the weight decay was set to " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "10^{-4}" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": ". The initial learning rate was set to " + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "inline_equation", + "content": "2 \\times 10^{-4}" + }, + { + "bbox": [ + 313, + 72, + 553, + 192 + ], + "type": "text", + "content": ", gradually decreased following a cosine annealing schedule. In inference phase, each test image undergoes augmentation through horizontal and vertical flips before input into the model. The final restored image is generated by averaging all augmented outputs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 201, + 368, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 201, + 368, + 213 + ], + "spans": [ + { + "bbox": [ + 313, + 201, + 368, + 213 + ], + "type": "text", + "content": "4.12. colab" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 220, + 553, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 220, + 553, + 388 + ], + "spans": [ + { + "bbox": [ + 313, + 220, + 553, + 388 + ], + "type": "text", + "content": "Our team proposes an improved method based on EFNet, named DEFNet (Dynamic Enhanced Fusion Network). This method incorporates three key enhancements. First, we introduce a multi-scale dynamic fusion module, which fuses event and image features at multiple spatial resolutions, significantly improving the restoration of fine details in blurred areas[17]. Second, we enhance the original EICA module by integrating a bidirectional attention mechanism, enabling more effective mutual guidance and interaction between image and event features. Third, for processing event data, we adopt a weighted interpolation strategy[40] that models the dynamic weighting of event sequences more accurately, thereby enriching the temporal details provided to the image restoration process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 396, + 384, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 396, + 384, + 407 + ], + "spans": [ + { + "bbox": [ + 313, + 396, + 384, + 407 + ], + "type": "text", + "content": "4.12.1. Network" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 412, + 553, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 412, + 553, + 496 + ], + "spans": [ + { + "bbox": [ + 313, + 412, + 553, + 496 + ], + "type": "text", + "content": "Fig. 10 presents the architecture of DEFNet, which is built upon EFNet and incorporates the newly introduced modules: the multi-scale dynamic fusion module and the enhanced EICA module with a bidirectional attention mechanism. These components work collaboratively to optimize the motion deblurring process by improving feature representation and fusion between the image and event data." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 498, + 554, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 554, + 665 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 554, + 665 + ], + "type": "text", + "content": "During the deblurring process, event streams are used to provide fine-grained temporal variation information that guides the restoration of motion blur in image frames. Specifically, the Symmetric Cumulative Event Representation (SCER) encodes the temporal distribution of events while the enhanced Event-Image Cross-modal Attention Fusion (EICA) module leverages bidirectional attention to facilitate deeper interaction between modalities. Additionally, the integration of weighted interpolation improves the temporal alignment and accuracy of event feature extraction. Together, these components enable DEFNet to more effectively restore motion-blurred images by enhancing edge sharpness, preserving texture, and capturing motion dynamics with higher fidelity." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 673, + 448, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 673, + 448, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 673, + 448, + 685 + ], + "type": "text", + "content": "4.12.2. Implementation Details" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "content": "We use the AdamW optimizer with an initial learning rate of 2e-4, weight decay of 1e-4, and betas set to [0.9, 0.99]." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 74, + 500, + 242 + ], + "blocks": [ + { + "bbox": [ + 109, + 74, + 500, + 242 + ], + "lines": [ + { + "bbox": [ + 109, + 74, + 500, + 242 + ], + "spans": [ + { + "bbox": [ + 109, + 74, + 500, + 242 + ], + "type": "image", + "image_path": "631527531248f11e51e7497f1fac20dcc126570e11b65b294387f6222f8be17d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 194, + 252, + 416, + 264 + ], + "lines": [ + { + "bbox": [ + 194, + 252, + 416, + 264 + ], + "spans": [ + { + "bbox": [ + 194, + 252, + 416, + 264 + ], + "type": "text", + "content": "Figure 9. The model framework proposed by Team JNU620." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 95, + 283, + 254, + 534 + ], + "blocks": [ + { + "bbox": [ + 95, + 283, + 254, + 534 + ], + "lines": [ + { + "bbox": [ + 95, + 283, + 254, + 534 + ], + "spans": [ + { + "bbox": [ + 95, + 283, + 254, + 534 + ], + "type": "image", + "image_path": "94f49739a4b00f51de806981019b97f06eafd67affb2ff59cff77e96de1c28d9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 69, + 546, + 281, + 558 + ], + "lines": [ + { + "bbox": [ + 69, + 546, + 281, + 558 + ], + "spans": [ + { + "bbox": [ + 69, + 546, + 281, + 558 + ], + "type": "text", + "content": "Figure 10. DEFNet architecture, proposed by Team colab." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 582, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 582, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 582, + 296, + 713 + ], + "type": "text", + "content": "To dynamically adjust the learning rate, we used the True-CosineAnnealingLR scheduler with a maximum iteration count of T_max = 200000 and a minimum learning rate of 1e-7. During training, the batch size was set to 4, and 3 worker threads were used per GPU. The total number of training iterations was set to 40000. This method was trained and validated on the HighREV dataset. The model achieved significant improvements on both the training and validation sets, with PSNR and SSIM used as evaluation metrics during training. Validation was performed every 10,000 iterations, and the model was regularly saved." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 313, + 285, + 373, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 285, + 373, + 296 + ], + "spans": [ + { + "bbox": [ + 313, + 285, + 373, + 296 + ], + "type": "text", + "content": "4.13.CMSL" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 313, + 302, + 554, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 302, + 554, + 350 + ], + "spans": [ + { + "bbox": [ + 313, + 302, + 554, + 350 + ], + "type": "text", + "content": "The Cascade Event Deblurring Model With Event Edge Loss was built based on EFNet [39]. An motion edge loss and a cascade framework were introduced to enhance the performance of EFNet." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 350, + 555, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 350, + 555, + 518 + ], + "spans": [ + { + "bbox": [ + 313, + 350, + 555, + 518 + ], + "type": "text", + "content": "The EFNet backbone was adopted and two improvements were proposed. Firstly, the event data were organized and represented as voxel [39]. Then, two frame of the event voxels that were most close to the center of the exposure time were multiplied to produce a motion edge frame. The motion edge frame contains the edge of the moving objects in the current frame as shown in fig. 11, fig. 12 is the corresponding edge of the ground truth image (sharp image). As shown in fig. 11 and fig. 12, the motion edge contains clear lines that were consistent with the true edges and could served as a guiding information for image deblurring. The edge of the deblured image output by the module should be similar to the motion edge. Therefore, a motion edge loss were proposed as follow:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 374, + 530, + 494, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 374, + 530, + 494, + 544 + ], + "spans": [ + { + "bbox": [ + 374, + 530, + 494, + 544 + ], + "type": "interline_equation", + "content": "\\ell_ {e d g e} = \\operatorname {m s e} (e d g e (\\widehat {x}) \\cdot m, e)", + "image_path": "047d45c9675c3b93c5f749c35e8ec0364ffc2a4e8ec6d19a9d71dffe864ced92.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 365, + 551, + 504, + 563 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 551, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 365, + 551, + 504, + 563 + ], + "type": "interline_equation", + "content": "m _ {i, j} = 1 \\quad \\text {i f} \\quad e _ {i, j} > \\tau , \\quad \\text {e l s e} \\quad 0", + "image_path": "ddc48fce0e5764c0d89a584e63e9518afb9950de5745b935d97fd99b64708c59.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "inline_equation", + "content": "\\mathrm{mse(A,B)}" + }, + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "text", + "content": " is the mean squared error between each element in matrix A and B, " + }, + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "inline_equation", + "content": "\\widehat{x}" + }, + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "text", + "content": " is the output deblured image, e is the motion edge frame, m is the motion edge mask, " + }, + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 570, + 554, + 617 + ], + "type": "text", + "content": " is the threshold parameter." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 618, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 618, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 618, + 555, + 714 + ], + "type": "text", + "content": "Secondly, a cascade frame work were proposed that two EFNet was connected in cascade to further enhance the image deblurring ability. The first EFNet took four frames of the event voxels that were relatively remote to the center of the exposure time while the second EFNet took two frames of the event voxels that were relatively close to the center of the exposure time. The two EFNet form a coarse-fine paradigm that gradually remove the motion delur." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 61, + 74, + 291, + 248 + ], + "blocks": [ + { + "bbox": [ + 61, + 74, + 291, + 248 + ], + "lines": [ + { + "bbox": [ + 61, + 74, + 291, + 248 + ], + "spans": [ + { + "bbox": [ + 61, + 74, + 291, + 248 + ], + "type": "image", + "image_path": "90e21bbf3f89174ac0e0fa957469dd6db0f60d186023d4e39a2ba972e14a96e5.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 260, + 265, + 272 + ], + "lines": [ + { + "bbox": [ + 85, + 260, + 265, + 272 + ], + "spans": [ + { + "bbox": [ + 85, + 260, + 265, + 272 + ], + "type": "text", + "content": "Figure 11. The visualization of the motion edges." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 61, + 290, + 291, + 463 + ], + "blocks": [ + { + "bbox": [ + 61, + 290, + 291, + 463 + ], + "lines": [ + { + "bbox": [ + 61, + 290, + 291, + 463 + ], + "spans": [ + { + "bbox": [ + 61, + 290, + 291, + 463 + ], + "type": "image", + "image_path": "0cfd0d4be9e6d4f6281d40f20feff55f33ef42523e6946f552f2758c4155dabf.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 89, + 475, + 261, + 487 + ], + "lines": [ + { + "bbox": [ + 89, + 475, + 261, + 487 + ], + "spans": [ + { + "bbox": [ + 89, + 475, + 261, + 487 + ], + "type": "text", + "content": "Figure 12. The edges in the ground truth frame" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 510, + 115, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 510, + 115, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 510, + 115, + 521 + ], + "type": "text", + "content": "4.14. KUnet" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 529, + 143, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 529, + 143, + 540 + ], + "spans": [ + { + "bbox": [ + 55, + 529, + 143, + 540 + ], + "type": "text", + "content": "4.14.1. Architecture" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 544, + 295, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 295, + 628 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 295, + 628 + ], + "type": "text", + "content": "Their solution is built upon a custom KUnet backbone tailored for event-based image deblurring. The model employs a dual-encoder strategy that separately processes RGB images and voxelized event data, each through a dedicated encoder branch. At the bottleneck, the features are fused via channel-wise concatenation and passed through a transformer module." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 630, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 630, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 630, + 295, + 715 + ], + "type": "text", + "content": "A key novelty in the design is the use of KANLinear layers within the transformer block. These layers, based on spline-interpolated kernels, improve attention expressiveness without adding significant computational overhead. This fusion architecture leverages the temporal sharpness of events with the spatial-semantic richness of RGB images to produce high-fidelity deblurred outputs." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 326, + 70, + 433, + 152 + ], + "blocks": [ + { + "bbox": [ + 326, + 70, + 433, + 152 + ], + "lines": [ + { + "bbox": [ + 326, + 70, + 433, + 152 + ], + "spans": [ + { + "bbox": [ + 326, + 70, + 433, + 152 + ], + "type": "image", + "image_path": "e84d3aa1ada2855602975b3faa64b193f7dfa02538dc62e0653e4b2bc050f2da.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 159, + 554, + 182 + ], + "lines": [ + { + "bbox": [ + 313, + 159, + 554, + 182 + ], + "spans": [ + { + "bbox": [ + 313, + 159, + 554, + 182 + ], + "type": "text", + "content": "Figure 13. Left: Input blurry frame. Right: output of KUnet, with detailed texture." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 435, + 71, + 542, + 152 + ], + "blocks": [ + { + "bbox": [ + 435, + 71, + 542, + 152 + ], + "lines": [ + { + "bbox": [ + 435, + 71, + 542, + 152 + ], + "spans": [ + { + "bbox": [ + 435, + 71, + 542, + 152 + ], + "type": "image", + "image_path": "d8e8b79c88d3c5152418bdc02984efc800208515ce23ccfbc32ffe188180b135.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 204, + 448, + 216 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 204, + 448, + 216 + ], + "spans": [ + { + "bbox": [ + 313, + 204, + 448, + 216 + ], + "type": "text", + "content": "4.14.2. Implementation Details" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 219, + 554, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 219, + 554, + 277 + ], + "spans": [ + { + "bbox": [ + 313, + 219, + 554, + 277 + ], + "type": "text", + "content": "They train the model from scratch on the official NTIRE 2025 HighREV dataset without any external data or pretrained weights. The voxelized events are represented using 6 temporal bins, generating a 6-channel input tensor for the event encoder." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "spans": [ + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "text", + "content": "Training was conducted using 2 NVIDIA A100 GPUs with a batch size of 8 and a patch size of " + }, + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "inline_equation", + "content": "256 \\times 256" + }, + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "text", + "content": ". They trained the network for 150k iterations using the AdamW optimizer (" + }, + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "inline_equation", + "content": "\\beta_{1} = 0.9" + }, + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "inline_equation", + "content": "\\beta_{2} = 0.99" + }, + { + "bbox": [ + 313, + 279, + 554, + 351 + ], + "type": "text", + "content": ", weight decay = 1e-4) and a CosineAnnealingLR scheduler. Data augmentations included random horizontal flips and rotations." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 351, + 553, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 351, + 553, + 387 + ], + "spans": [ + { + "bbox": [ + 313, + 351, + 553, + 387 + ], + "type": "text", + "content": "The loss function includes a PSNR loss weighted at 0.5. Their final checkpoint achieved a peak PSNR of 29.42 on the NTIRE 2025 validation phase." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 388, + 554, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 388, + 554, + 448 + ], + "spans": [ + { + "bbox": [ + 313, + 388, + 554, + 448 + ], + "type": "text", + "content": "Inference was performed using a sliding window approach with a max minibatch size of 8. They observed an inference time of " + }, + { + "bbox": [ + 313, + 388, + 554, + 448 + ], + "type": "inline_equation", + "content": "\\sim 0.15" + }, + { + "bbox": [ + 313, + 388, + 554, + 448 + ], + "type": "text", + "content": " seconds per frame on an A100 GPU, and a memory footprint of approximately 16 GB during training." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 326, + 448, + 411, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 448, + 411, + 459 + ], + "spans": [ + { + "bbox": [ + 326, + 448, + 411, + 459 + ], + "type": "text", + "content": "Model Complexity:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 460, + 485, + 506 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 315, + 460, + 397, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 460, + 397, + 470 + ], + "spans": [ + { + "bbox": [ + 315, + 460, + 397, + 470 + ], + "type": "text", + "content": "Parameters: 11M" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 472, + 416, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 472, + 416, + 483 + ], + "spans": [ + { + "bbox": [ + 315, + 472, + 416, + 483 + ], + "type": "text", + "content": "FLOPs: Not computed" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 483, + 485, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 483, + 485, + 495 + ], + "spans": [ + { + "bbox": [ + 315, + 483, + 485, + 495 + ], + "type": "text", + "content": "- GPU Memory Usage: 16 GB (training)" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 496, + 443, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 496, + 443, + 506 + ], + "spans": [ + { + "bbox": [ + 315, + 496, + 443, + 506 + ], + "type": "text", + "content": "Inference Time: 0.15s/frame" + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 326, + 508, + 419, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 508, + 419, + 518 + ], + "spans": [ + { + "bbox": [ + 326, + 508, + 419, + 518 + ], + "type": "text", + "content": "Code and Resources:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 520, + 554, + 593 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 315, + 520, + 554, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 520, + 554, + 544 + ], + "spans": [ + { + "bbox": [ + 315, + 520, + 554, + 544 + ], + "type": "text", + "content": "- GitHub: https://github.com/Splendor73/NTIRE2025_EventDeblur_challenge_asu" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 544, + 554, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 544, + 554, + 567 + ], + "spans": [ + { + "bbox": [ + 315, + 544, + 554, + 567 + ], + "type": "text", + "content": "- Pretrained: https://www.dropbox.com/scl/fi/19td2xtbzxed2bg8tc9w0/17_KUnet.zip" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 568, + 554, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 568, + 554, + 593 + ], + "spans": [ + { + "bbox": [ + 315, + 568, + 554, + 593 + ], + "type": "text", + "content": "- Results: https://www.dropbox.com/scl/fi/yrky29x2mdwt3k8e40yol/Results.zip" + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 313, + 599, + 385, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 599, + 385, + 613 + ], + "spans": [ + { + "bbox": [ + 313, + 599, + 385, + 613 + ], + "type": "text", + "content": "4.15. Group10" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "spans": [ + { + "bbox": [ + 313, + 617, + 554, + 677 + ], + "type": "text", + "content": "The solution is built upon a custom adaptation of the EFNet deblurring framework[39]. The method strategically harnesses both conventional image data and event-based information to mitigate motion blur effectively. Key components of the approach include:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 677, + 554, + 714 + ], + "type": "text", + "content": "Dual-Stream Network Architecture: The model consists of parallel convolutional streams. One stream processes the blurry input image, while the other processes event data," + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 120 + ], + "type": "text", + "content": "which is converted into a voxel grid representation. A cross-modal attention module subsequently fuses the features extracted from both modalities, enhancing the network's ability to recover fine details in dynamic scenes." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 121, + 294, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 121, + 294, + 193 + ], + "spans": [ + { + "bbox": [ + 55, + 121, + 294, + 193 + ], + "type": "text", + "content": "Event Data Representation: The raw event data - comprising spatial coordinates, timestamps, and polarity - is transformed into a voxel grid. This process involves temporal normalization and spatial mapping, enabling the network to capture the dynamic nature of motion events with high precision." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 194, + 294, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 194, + 294, + 277 + ], + "spans": [ + { + "bbox": [ + 55, + 194, + 294, + 277 + ], + "type": "text", + "content": "Training Strategy: Utilizing mixed precision training to maximize GPU efficiency and accelerate the convergence process. Gradient accumulation is employed to effectively simulate a larger batch size, which is critical for stable training on high-resolution data. The training loss is computed using the Mean Squared Error (MSE) criterion, guiding the network to produce high-quality deblurred images." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 277, + 294, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 277, + 294, + 348 + ], + "spans": [ + { + "bbox": [ + 55, + 277, + 294, + 348 + ], + "type": "text", + "content": "Data Pipeline: Custom PyTorch Dataset classes handle the loading and preprocessing of both image and event data. The pipeline includes resizing, normalization, and careful synchronization between blurry images and their corresponding event data, ensuring data consistency across modalities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 350, + 294, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 350, + 294, + 410 + ], + "spans": [ + { + "bbox": [ + 55, + 350, + 294, + 410 + ], + "type": "text", + "content": "Performance Evaluation: The evaluation strategy employs widely accepted metrics such as PSNR and SSIM to quantify restoration quality. Test outputs are resized to their original dimensions and saved as lossless PNG images to preserve the fidelity of the results." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 411, + 176, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 411, + 176, + 422 + ], + "spans": [ + { + "bbox": [ + 67, + 411, + 176, + 422 + ], + "type": "text", + "content": "Additional details include:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 423, + 295, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 423, + 295, + 470 + ], + "spans": [ + { + "bbox": [ + 55, + 423, + 295, + 470 + ], + "type": "text", + "content": "Parameter Count: The EnhancedEFNet model consists of convolutional layers, CrossModalAttention blocks, and skip connections, leading to a parameter count in the range of millions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 472, + 294, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 472, + 294, + 520 + ], + "spans": [ + { + "bbox": [ + 55, + 472, + 294, + 520 + ], + "type": "text", + "content": "CrossModalAttention layers: These layers introduce additional tensor operations and memory usage. No external pre-trained models were directly used in training. The architecture was trained from scratch on the provided dataset." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 521, + 294, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 521, + 294, + 556 + ], + "spans": [ + { + "bbox": [ + 55, + 521, + 294, + 556 + ], + "type": "text", + "content": "GPU Memory Usage: Memory usage is influenced by Batch Size, Default batch size of 4 per GPU, and Voxel Grid Representation, Uses 6 event bins, increasing input size." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 557, + 294, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 557, + 294, + 617 + ], + "spans": [ + { + "bbox": [ + 55, + 557, + 294, + 617 + ], + "type": "text", + "content": "CrossModalAttention: Inspired by self-attention mechanisms in Transformer models. Hybrid Loss Function: Combines MSE and L1 loss for better generalization.CosineAnnealingLR Scheduler: Used to dynamically adjust learning rates during training." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 618, + 294, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 618, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 618, + 294, + 713 + ], + "type": "text", + "content": "Use of Additional Training Data: Only NTIRE Dataset Used: The training was restricted to the HighREV dataset provided by NTIRE. No additional synthetic or external event-based datasets were incorporated. Potential Future Enhancements: Using real-world event datasets (e.g., DSEC, MVSEC) could improve generalization. Finetuning with pre-trained image restoration models (like DeblurGAN) could be explored." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 72, + 553, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 553, + 203 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 553, + 203 + ], + "type": "text", + "content": "Quantitative and Qualitative Improvements Quantitative Improvements (Metrics & Performance): Peak Signal-to-Noise Ratio (PSNR): Achieved PSNR: 25.93. Improved compared to baseline event fusion models. Structural Similarity Index (SSIM): Achieved SSIM: 0.82. Indicates better perceptual quality in restored images. Qualitative Improvements (Visual Results & Generalization): Better Detail Recovery: The attention-based fusion of events and images leads to sharper edges and better contrast in reconstructed images. Works well in low-light or high-motion blur scenarios." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 204, + 553, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 204, + 553, + 312 + ], + "spans": [ + { + "bbox": [ + 313, + 204, + 553, + 312 + ], + "type": "text", + "content": "Comparison with Baseline Models: Standard CNN-based deblurring struggles with fine-grained event details, but EnhancedEFNet effectively fuses event features to improve deblurring accuracy. CrossModalAttention aids in spatial alignment of events and images, reducing artifacts. Failure Cases & Future Improvements: Highly blurred images with saturated event data can still cause artifacts. More robust fusion mechanisms (e.g., transformer-based approaches) could further enhance performance." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 323, + 411, + 335 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 323, + 411, + 335 + ], + "spans": [ + { + "bbox": [ + 314, + 323, + 411, + 335 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 342, + 553, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 342, + 553, + 450 + ], + "spans": [ + { + "bbox": [ + 313, + 342, + 553, + 450 + ], + "type": "text", + "content": "This work was partially supported by the Humboldt Foundation, the Ministry of Education and Science of Bulgaria (support for INSAIT, part of the Bulgarian National Roadmap for Research Infrastructure). Shaolin Su was supported by the HORIZON MSCA Postdoctoral Fellowships funded by the European Union (project number 101152858). We thank the NTIRE 2025 sponsors: ByteDance, Meituan, Kuaishou, and University of Wurzburg (Computer Vision Lab)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 461, + 444, + 474 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 461, + 444, + 474 + ], + "spans": [ + { + "bbox": [ + 314, + 461, + 444, + 474 + ], + "type": "text", + "content": "A. Teams and affiliations" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 480, + 403, + 491 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 480, + 403, + 491 + ], + "spans": [ + { + "bbox": [ + 314, + 480, + 403, + 491 + ], + "type": "text", + "content": "NTIRE 2025 team" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 498, + 553, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 498, + 553, + 510 + ], + "spans": [ + { + "bbox": [ + 313, + 498, + 553, + 510 + ], + "type": "text", + "content": "Title: NTIRE 2025 Event-Based Image Deblurring" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 510, + 358, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 510, + 358, + 521 + ], + "spans": [ + { + "bbox": [ + 315, + 510, + 358, + 521 + ], + "type": "text", + "content": "Challenge" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 523, + 358, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 523, + 358, + 533 + ], + "spans": [ + { + "bbox": [ + 315, + 523, + 358, + 533 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 534, + 444, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 534, + 444, + 545 + ], + "spans": [ + { + "bbox": [ + 315, + 534, + 444, + 545 + ], + "type": "text", + "content": "Lei Sun1 (leo.sun@zju.edu.cn)," + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 315, + 546, + 502, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 546, + 502, + 557 + ], + "spans": [ + { + "bbox": [ + 315, + 546, + 502, + 557 + ], + "type": "text", + "content": "Andrea Alfarano1 (andrea.alfarano@insait.ai)," + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 315, + 558, + 477, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 558, + 477, + 569 + ], + "spans": [ + { + "bbox": [ + 315, + 558, + 477, + 569 + ], + "type": "text", + "content": "Peiqi Duan2 (duanqi0001@pku.edu.cn)," + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 570, + 459, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 570, + 459, + 582 + ], + "spans": [ + { + "bbox": [ + 315, + 570, + 459, + 582 + ], + "type": "text", + "content": "Shaolin " + }, + { + "bbox": [ + 315, + 570, + 459, + 582 + ], + "type": "inline_equation", + "content": "\\mathrm{Su}^3" + }, + { + "bbox": [ + 315, + 570, + 459, + 582 + ], + "type": "text", + "content": " (shaolin@cvc.uab.cat)," + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 582, + 485, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 582, + 485, + 594 + ], + "spans": [ + { + "bbox": [ + 315, + 582, + 485, + 594 + ], + "type": "text", + "content": "Kaiwei Wang4 (wangkaiwei@zju.edu.cn)," + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 315, + 594, + 460, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 594, + 460, + 605 + ], + "spans": [ + { + "bbox": [ + 315, + 594, + 460, + 605 + ], + "type": "text", + "content": "Boxin Shi" + }, + { + "bbox": [ + 315, + 594, + 460, + 605 + ], + "type": "inline_equation", + "content": "^2" + }, + { + "bbox": [ + 315, + 594, + 460, + 605 + ], + "type": "text", + "content": " (shiboxin@pku.edu.cn)," + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 606, + 514, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 606, + 514, + 617 + ], + "spans": [ + { + "bbox": [ + 315, + 606, + 514, + 617 + ], + "type": "text", + "content": "Radu Timofte" + }, + { + "bbox": [ + 315, + 606, + 514, + 617 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 315, + 606, + 514, + 617 + ], + "type": "text", + "content": " (radu.timofte@uni-wuerzburg.de)" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 618, + 502, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 618, + 502, + 629 + ], + "spans": [ + { + "bbox": [ + 315, + 618, + 502, + 629 + ], + "type": "text", + "content": "Danda Pani Paudel1 (danda.paudel@insait.ai)," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 315, + 630, + 496, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 630, + 496, + 641 + ], + "spans": [ + { + "bbox": [ + 315, + 630, + 496, + 641 + ], + "type": "text", + "content": "Luc Van Gool1 (vangool@vision.ee.ethz.ch)," + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 654, + 365, + 664 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 654, + 365, + 664 + ], + "spans": [ + { + "bbox": [ + 314, + 654, + 365, + 664 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 315, + 666, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 33, + "blocks": [ + { + "bbox": [ + 315, + 666, + 553, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 666, + 553, + 689 + ], + "spans": [ + { + "bbox": [ + 315, + 666, + 553, + 689 + ], + "type": "text", + "content": "1 INSAIT, Sofia University \"St. Kliment Ohridski\", Bulgaria" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 315, + 690, + 424, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 690, + 424, + 701 + ], + "spans": [ + { + "bbox": [ + 315, + 690, + 424, + 701 + ], + "type": "text", + "content": "2 Peking University, China" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 315, + 702, + 449, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 702, + 449, + 713 + ], + "spans": [ + { + "bbox": [ + 315, + 702, + 449, + 713 + ], + "type": "text", + "content": "3 Computer Vision Center, Spain" + } + ] + } + ], + "index": 32 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 202, + 96 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 55, + 72, + 174, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 174, + 83 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 174, + 83 + ], + "type": "text", + "content": "4 Zhejiang University, China" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 83, + 202, + 96 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 83, + 202, + 96 + ], + "spans": [ + { + "bbox": [ + 55, + 83, + 202, + 96 + ], + "type": "text", + "content": "5 University of Würzburg, Germany" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 56, + 115, + 104, + 126 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 115, + 104, + 126 + ], + "spans": [ + { + "bbox": [ + 56, + 115, + 104, + 126 + ], + "type": "text", + "content": "IVISLAB" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 133, + 277, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 133, + 277, + 145 + ], + "spans": [ + { + "bbox": [ + 55, + 133, + 277, + 145 + ], + "type": "text", + "content": "Title: Triple Event-stream Image Deblurring Network" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 146, + 99, + 156 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 146, + 99, + 156 + ], + "spans": [ + { + "bbox": [ + 56, + 146, + 99, + 156 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "spans": [ + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "content": "Qinglin Liu" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "content": " (qlliu@hit.edu.cn), Wei Yu" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "content": ", Xiaogian Lv" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "content": ", Lu Yang" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "content": ", Shuigen Wang" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "content": ", Shengping Zhang" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "text", + "content": ", Xiangyang Ji" + }, + { + "bbox": [ + 55, + 156, + 294, + 192 + ], + "type": "inline_equation", + "content": "^{2}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 194, + 107, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 194, + 107, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 194, + 107, + 205 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 205, + 220, + 241 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 56, + 205, + 220, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 205, + 220, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 205, + 220, + 217 + ], + "type": "text", + "content": "1 Harbin Institute of Technology, Weihai" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 217, + 147, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 217, + 147, + 229 + ], + "spans": [ + { + "bbox": [ + 56, + 217, + 147, + 229 + ], + "type": "text", + "content": "2 Tsinghua University" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 229, + 184, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 229, + 184, + 241 + ], + "spans": [ + { + "bbox": [ + 56, + 229, + 184, + 241 + ], + "type": "text", + "content": "3 Raytron Technology Co., Ltd." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 56, + 261, + 132, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 261, + 132, + 272 + ], + "spans": [ + { + "bbox": [ + 56, + 261, + 132, + 272 + ], + "type": "text", + "content": "MiVideoDeblur" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 278, + 294, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 278, + 294, + 301 + ], + "spans": [ + { + "bbox": [ + 55, + 278, + 294, + 301 + ], + "type": "text", + "content": "Title: Event-Based Image Deblurring from Team MiVideoDeblur" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 303, + 100, + 313 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 303, + 100, + 313 + ], + "spans": [ + { + "bbox": [ + 56, + 303, + 100, + 313 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 314, + 294, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 314, + 294, + 338 + ], + "spans": [ + { + "bbox": [ + 55, + 314, + 294, + 338 + ], + "type": "text", + "content": "Long Bao1 (baolong@xiaomi.com), Yuqiang Yang1, Jinao Song1, Ziyi Wang1, Shuang Wen1, Heng Sun1" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 339, + 107, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 339, + 107, + 350 + ], + "spans": [ + { + "bbox": [ + 56, + 339, + 107, + 350 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 57, + 350, + 141, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 350, + 141, + 361 + ], + "spans": [ + { + "bbox": [ + 57, + 350, + 141, + 361 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 350, + 141, + 361 + ], + "type": "text", + "content": " Xiaomi Inc., China" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 381, + 122, + 393 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 381, + 122, + 393 + ], + "spans": [ + { + "bbox": [ + 56, + 381, + 122, + 393 + ], + "type": "text", + "content": "404NotFound" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 55, + 399, + 294, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 399, + 294, + 423 + ], + "spans": [ + { + "bbox": [ + 55, + 399, + 294, + 423 + ], + "type": "text", + "content": "Title: Event-Conditioned Dual-Modal Fusion for Motion Deblurring" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 56, + 425, + 99, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 425, + 99, + 434 + ], + "spans": [ + { + "bbox": [ + 56, + 425, + 99, + 434 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 55, + 435, + 294, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 435, + 294, + 482 + ], + "spans": [ + { + "bbox": [ + 55, + 435, + 294, + 482 + ], + "type": "text", + "content": "Kean Liu1 (rickyliu@mail.ustc.edu.cn), Mingchen Zhong1, Senyan Xu1, Zhijing Sun1, Jiaying Zhu1, Chengjie Ge1, Xingbo Wang1, Yidi Liu1, Xin Lu1, Xueyang Fu1, Zheng-Jun Zha1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 483, + 107, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 483, + 107, + 495 + ], + "spans": [ + { + "bbox": [ + 56, + 483, + 107, + 495 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 57, + 495, + 255, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 495, + 255, + 507 + ], + "spans": [ + { + "bbox": [ + 57, + 495, + 255, + 507 + ], + "type": "text", + "content": "1 University of Science and Technology of China" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 56, + 527, + 118, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 527, + 118, + 540 + ], + "spans": [ + { + "bbox": [ + 56, + 527, + 118, + 540 + ], + "type": "text", + "content": "Give_it_a_try" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 55, + 544, + 294, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 544, + 294, + 568 + ], + "spans": [ + { + "bbox": [ + 55, + 544, + 294, + 568 + ], + "type": "text", + "content": "Title: Event-Based Image Deblurring from Team Give_it_a_try" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 56, + 569, + 99, + 579 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 569, + 99, + 579 + ], + "spans": [ + { + "bbox": [ + 56, + 569, + 99, + 579 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "spans": [ + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "type": "text", + "content": "Dawei Fan" + }, + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "type": "text", + "content": " (dawei.fan@partner.samsung.com), Dafeng Zhang" + }, + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "type": "text", + "content": ", Yong Yang" + }, + { + "bbox": [ + 55, + 580, + 294, + 604 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 56, + 605, + 107, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 605, + 107, + 616 + ], + "spans": [ + { + "bbox": [ + 56, + 605, + 107, + 616 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 57, + 616, + 240, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 616, + 240, + 628 + ], + "spans": [ + { + "bbox": [ + 57, + 616, + 240, + 628 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 616, + 240, + 628 + ], + "type": "text", + "content": " Samsung Research China- Beijing (SRC-B)" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 56, + 647, + 108, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 108, + 658 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 108, + 658 + ], + "type": "text", + "content": "BUPTMM" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 55, + 665, + 293, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 293, + 688 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 293, + 688 + ], + "type": "text", + "content": "Title: Weighted Fusion for Event-based Image Deblurring Members:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "content": "Siru Zhang" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "content": " (zhangsr@bupt.edu.cn), Qinghua Yang" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "content": ", Hao Kang" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "content": ", Huiyuan Fu" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "content": ", Heng Zhang" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "content": ", Hongyuan Yu" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 55, + 689, + 294, + 714 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 313, + 72, + 381, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 381, + 84 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 381, + 84 + ], + "type": "text", + "content": "Zhijuan Huang" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 314, + 85, + 365, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 85, + 365, + 95 + ], + "spans": [ + { + "bbox": [ + 314, + 85, + 365, + 95 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 314, + 96, + 553, + 131 + ], + "type": "list", + "angle": 0, + "index": 37, + "blocks": [ + { + "bbox": [ + 314, + 96, + 553, + 120 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 96, + 553, + 120 + ], + "spans": [ + { + "bbox": [ + 314, + 96, + 553, + 120 + ], + "type": "text", + "content": "1 Beijing University of Posts and Telecommunications, Beijing, China." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 120, + 403, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 120, + 403, + 131 + ], + "spans": [ + { + "bbox": [ + 315, + 120, + 403, + 131 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 120, + 403, + 131 + ], + "type": "text", + "content": " Xiaomi Inc., China." + } + ] + } + ], + "index": 36 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 154, + 340, + 165 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 154, + 340, + 165 + ], + "spans": [ + { + "bbox": [ + 315, + 154, + 340, + 165 + ], + "type": "text", + "content": "WEI" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 313, + 172, + 553, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 172, + 553, + 196 + ], + "spans": [ + { + "bbox": [ + 313, + 172, + 553, + 196 + ], + "type": "text", + "content": "Title: Bi-directional Gathered Recurrent Network for Event-based Image Deblurring" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 315, + 198, + 358, + 207 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 198, + 358, + 207 + ], + "spans": [ + { + "bbox": [ + 315, + 198, + 358, + 207 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 314, + 208, + 490, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 208, + 490, + 220 + ], + "spans": [ + { + "bbox": [ + 314, + 208, + 490, + 220 + ], + "type": "text", + "content": "Shuoyan Wei1 (shuoyan.wei@bjtu.edu.cn)," + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 315, + 220, + 418, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 220, + 418, + 232 + ], + "spans": [ + { + "bbox": [ + 315, + 220, + 418, + 232 + ], + "type": "text", + "content": "Feng Li" + }, + { + "bbox": [ + 315, + 220, + 418, + 232 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 220, + 418, + 232 + ], + "type": "text", + "content": ", Runmin Cong" + }, + { + "bbox": [ + 315, + 220, + 418, + 232 + ], + "type": "inline_equation", + "content": "^{3}" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 315, + 233, + 365, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 233, + 365, + 243 + ], + "spans": [ + { + "bbox": [ + 315, + 233, + 365, + 243 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 315, + 244, + 553, + 316 + ], + "type": "list", + "angle": 0, + "index": 47, + "blocks": [ + { + "bbox": [ + 315, + 244, + 553, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 244, + 553, + 268 + ], + "spans": [ + { + "bbox": [ + 315, + 244, + 553, + 268 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 244, + 553, + 268 + ], + "type": "text", + "content": " Institute of Information Science, Beijing Jiaotong University" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 315, + 268, + 553, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 268, + 553, + 293 + ], + "spans": [ + { + "bbox": [ + 315, + 268, + 553, + 293 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 268, + 553, + 293 + ], + "type": "text", + "content": " School of Computer Science and Engineering, Hefei University of Technology" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 293, + 553, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 293, + 553, + 316 + ], + "spans": [ + { + "bbox": [ + 315, + 293, + 553, + 316 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 315, + 293, + 553, + 316 + ], + "type": "text", + "content": " School of Control Science and Engineering, Shandong University" + } + ] + } + ], + "index": 46 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 338, + 370, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 338, + 370, + 350 + ], + "spans": [ + { + "bbox": [ + 315, + 338, + 370, + 350 + ], + "type": "text", + "content": "DVS-WHU" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 313, + 357, + 553, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 553, + 380 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 553, + 380 + ], + "type": "text", + "content": "Title: Dual Channel Cross-modal Mamba for Event-based Motion Deblurring" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 315, + 381, + 358, + 391 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 381, + 358, + 391 + ], + "spans": [ + { + "bbox": [ + 315, + 381, + 358, + 391 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 50 + }, + { + "bbox": [ + 315, + 393, + 462, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 393, + 462, + 404 + ], + "spans": [ + { + "bbox": [ + 315, + 393, + 462, + 404 + ], + "type": "text", + "content": "Weiqi Luo1 (wikyluo@whu.edu.cn)," + } + ] + } + ], + "index": 51 + }, + { + "bbox": [ + 315, + 404, + 553, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 404, + 553, + 428 + ], + "spans": [ + { + "bbox": [ + 315, + 404, + 553, + 428 + ], + "type": "text", + "content": "Mingyun Lin1, Chenxu Jiang1, Hongyi Liu1, Lei Yu2 \nAffiliations:" + } + ] + } + ], + "index": 52 + }, + { + "bbox": [ + 315, + 429, + 533, + 453 + ], + "type": "list", + "angle": 0, + "index": 55, + "blocks": [ + { + "bbox": [ + 315, + 429, + 533, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 429, + 533, + 440 + ], + "spans": [ + { + "bbox": [ + 315, + 429, + 533, + 440 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 315, + 429, + 533, + 440 + ], + "type": "text", + "content": " School of Electronic Information, Wuhan University" + } + ] + } + ], + "index": 53 + }, + { + "bbox": [ + 315, + 441, + 528, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 441, + 528, + 453 + ], + "spans": [ + { + "bbox": [ + 315, + 441, + 528, + 453 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 315, + 441, + 528, + 453 + ], + "type": "text", + "content": " School of Artificial Intelligence, Wuhan University" + } + ] + } + ], + "index": 54 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 475, + 372, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 475, + 372, + 486 + ], + "spans": [ + { + "bbox": [ + 315, + 475, + 372, + 486 + ], + "type": "text", + "content": "PixelRevive" + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 313, + 493, + 553, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 493, + 553, + 516 + ], + "spans": [ + { + "bbox": [ + 313, + 493, + 553, + 516 + ], + "type": "text", + "content": "Title: Event-Based Image Deblurring from Team PixelRe-vive" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 315, + 517, + 358, + 527 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 517, + 358, + 527 + ], + "spans": [ + { + "bbox": [ + 315, + 517, + 358, + 527 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "spans": [ + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "type": "text", + "content": "Weilun Li" + }, + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "type": "text", + "content": " (xyj961011@163.com), Jiajun Zhai" + }, + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "type": "text", + "content": ", Tingting Lin" + }, + { + "bbox": [ + 314, + 529, + 553, + 552 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 315, + 553, + 365, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 553, + 365, + 564 + ], + "spans": [ + { + "bbox": [ + 315, + 553, + 365, + 564 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 315, + 565, + 553, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 565, + 553, + 589 + ], + "spans": [ + { + "bbox": [ + 315, + 565, + 553, + 589 + ], + "type": "text", + "content": "1 College of Optical Science and Engineering, Zhejiang University" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 315, + 611, + 342, + 622 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 611, + 342, + 622 + ], + "spans": [ + { + "bbox": [ + 315, + 611, + 342, + 622 + ], + "type": "text", + "content": "CHD" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 314, + 629, + 491, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 629, + 491, + 641 + ], + "spans": [ + { + "bbox": [ + 314, + 629, + 491, + 641 + ], + "type": "text", + "content": "Title: Event-Image Deblurformer Network" + } + ] + } + ], + "index": 63 + }, + { + "bbox": [ + 315, + 643, + 358, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 643, + 358, + 651 + ], + "spans": [ + { + "bbox": [ + 315, + 643, + 358, + 651 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 64 + }, + { + "bbox": [ + 314, + 653, + 553, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 653, + 553, + 677 + ], + "spans": [ + { + "bbox": [ + 314, + 653, + 553, + 677 + ], + "type": "text", + "content": "Shuang Ma1 (3125508679@qq.com), Sai Zhou2, Zhanwen Liu3, Yang Wang4" + } + ] + } + ], + "index": 65 + }, + { + "bbox": [ + 315, + 678, + 365, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 678, + 365, + 689 + ], + "spans": [ + { + "bbox": [ + 315, + 678, + 365, + 689 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 66 + }, + { + "bbox": [ + 315, + 689, + 463, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 689, + 463, + 701 + ], + "spans": [ + { + "bbox": [ + 315, + 689, + 463, + 701 + ], + "type": "text", + "content": "1 Chang'an University, Xi'an, China" + } + ] + } + ], + "index": 67 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 83, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 83, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 83, + 83 + ], + "type": "text", + "content": "SMU" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 91, + 296, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 91, + 296, + 115 + ], + "spans": [ + { + "bbox": [ + 55, + 91, + 296, + 115 + ], + "type": "text", + "content": "Title: Explicit Feature Tracking and Iterative Refinement for Enhancing Event-based Image Deblurring" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 116, + 100, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 116, + 100, + 125 + ], + "spans": [ + { + "bbox": [ + 56, + 116, + 100, + 125 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 126, + 294, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 126, + 294, + 151 + ], + "spans": [ + { + "bbox": [ + 56, + 126, + 294, + 151 + ], + "type": "text", + "content": "Eiffel Chong1, Nuwan Bandara1, Thivya Kandappu1 (thivyak@smu.edu.sg), Archan Misra1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 152, + 107, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 107, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 107, + 162 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 57, + 163, + 205, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 163, + 205, + 175 + ], + "spans": [ + { + "bbox": [ + 57, + 163, + 205, + 175 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 57, + 163, + 205, + 175 + ], + "type": "text", + "content": " Singapore Management University" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 200, + 97, + 213 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 200, + 97, + 213 + ], + "spans": [ + { + "bbox": [ + 56, + 200, + 97, + 213 + ], + "type": "text", + "content": "JNU620" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 219, + 293, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 219, + 293, + 232 + ], + "spans": [ + { + "bbox": [ + 55, + 219, + 293, + 232 + ], + "type": "text", + "content": "Title: Event-Based Image Deblurring from Team JNU620" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 232, + 100, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 232, + 100, + 243 + ], + "spans": [ + { + "bbox": [ + 56, + 232, + 100, + 243 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 243, + 217, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 243, + 217, + 255 + ], + "spans": [ + { + "bbox": [ + 56, + 243, + 217, + 255 + ], + "type": "text", + "content": "Yihang Chen" + }, + { + "bbox": [ + 56, + 243, + 217, + 255 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 243, + 217, + 255 + ], + "type": "text", + "content": " (Ehang@stu.jnu.edu.cn)," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "spans": [ + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "text", + "content": "Zhan Li" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "text", + "content": ", Weijun Yuan" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "text", + "content": ", Wenzhuo Wang" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "text", + "content": ", Boyang Yao" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "text", + "content": ", Zhanglu Chen" + }, + { + "bbox": [ + 56, + 255, + 294, + 279 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 280, + 107, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 280, + 107, + 291 + ], + "spans": [ + { + "bbox": [ + 56, + 280, + 107, + 291 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 291, + 294, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 291, + 294, + 316 + ], + "spans": [ + { + "bbox": [ + 56, + 291, + 294, + 316 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 291, + 294, + 316 + ], + "type": "text", + "content": " Department of Computer Science, Jinan University, Guangzhou, China" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 341, + 84, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 341, + 84, + 352 + ], + "spans": [ + { + "bbox": [ + 56, + 341, + 84, + 352 + ], + "type": "text", + "content": "colab" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 360, + 295, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 360, + 295, + 384 + ], + "spans": [ + { + "bbox": [ + 55, + 360, + 295, + 384 + ], + "type": "text", + "content": "Title: Dynamic Enhanced Fusion Network for Event-based Image Deblurring" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 56, + 385, + 100, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 385, + 100, + 395 + ], + "spans": [ + { + "bbox": [ + 56, + 385, + 100, + 395 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "spans": [ + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "text", + "content": "Yijing Sun" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "text", + "content": " (syj3508852939@163.com), Tianjiao Wan" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "text", + "content": ", Zijian Gao" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "text", + "content": ", Qisheng Xu" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "text", + "content": ", Kele Xu" + }, + { + "bbox": [ + 56, + 396, + 294, + 420 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 56, + 421, + 107, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 421, + 107, + 431 + ], + "spans": [ + { + "bbox": [ + 56, + 421, + 107, + 431 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 56, + 432, + 239, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 432, + 239, + 444 + ], + "spans": [ + { + "bbox": [ + 56, + 432, + 239, + 444 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 432, + 239, + 444 + ], + "type": "text", + "content": " National University of Defense Technology" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 56, + 469, + 90, + 481 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 469, + 90, + 481 + ], + "spans": [ + { + "bbox": [ + 56, + 469, + 90, + 481 + ], + "type": "text", + "content": "CMSL" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 55, + 488, + 295, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 488, + 295, + 511 + ], + "spans": [ + { + "bbox": [ + 55, + 488, + 295, + 511 + ], + "type": "text", + "content": "Title: Cascade Event Deblurring Model With Event Edge Loss" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 56, + 514, + 100, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 514, + 100, + 523 + ], + "spans": [ + { + "bbox": [ + 56, + 514, + 100, + 523 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "spans": [ + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "text", + "content": "Yukun Zhang" + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "text", + "content": " (zhangyukun@cmhi.chinamobile.com), Yu He" + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "text", + "content": ", Xiaoyan Xie" + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "text", + "content": ", Tao Fu" + }, + { + "bbox": [ + 56, + 525, + 294, + 548 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 56, + 549, + 107, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 549, + 107, + 559 + ], + "spans": [ + { + "bbox": [ + 56, + 549, + 107, + 559 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 56, + 560, + 294, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 560, + 294, + 585 + ], + "spans": [ + { + "bbox": [ + 56, + 560, + 294, + 585 + ], + "type": "text", + "content": "1 China Mobile (Hangzhou) Information Technology Co., Ltd, Hangzhou, China" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 56, + 609, + 90, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 609, + 90, + 621 + ], + "spans": [ + { + "bbox": [ + 56, + 609, + 90, + 621 + ], + "type": "text", + "content": "KUnet" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 56, + 629, + 107, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 629, + 107, + 640 + ], + "spans": [ + { + "bbox": [ + 56, + 629, + 107, + 640 + ], + "type": "text", + "content": "Title KUnet" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 56, + 642, + 100, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 642, + 100, + 652 + ], + "spans": [ + { + "bbox": [ + 56, + 642, + 100, + 652 + ], + "type": "text", + "content": "Members:" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 56, + 653, + 295, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 653, + 295, + 677 + ], + "spans": [ + { + "bbox": [ + 56, + 653, + 295, + 677 + ], + "type": "text", + "content": "Yashu Gautamkumar Patel1 (ypatel37@asu.edu), Vihar Ramesh Jain1, Divesh Basina1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 56, + 678, + 107, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 678, + 107, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 678, + 107, + 689 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 56, + 689, + 164, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 689, + 164, + 702 + ], + "spans": [ + { + "bbox": [ + 56, + 689, + 164, + 702 + ], + "type": "text", + "content": "1 Arizona State University" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 314, + 72, + 360, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 360, + 84 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 360, + 84 + ], + "type": "text", + "content": "Group10" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 313, + 89, + 555, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 89, + 555, + 112 + ], + "spans": [ + { + "bbox": [ + 313, + 89, + 555, + 112 + ], + "type": "text", + "content": "Title: Event-Based Image Deblurring from Team Group10 Members:" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "spans": [ + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": "Rishik Ashili" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": " (rishik67_soe@jnu.ac.in), Manish Kumar Manjhi" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": ", Sourav Kumar" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": ", Prinon Benny" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": ", Himanshu Ghunawat" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": ", B Sri Sairam Gautam" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": ", Anett Varghese" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "text", + "content": ", Abhishek Yadav" + }, + { + "bbox": [ + 313, + 113, + 554, + 160 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 314, + 162, + 366, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 162, + 366, + 173 + ], + "spans": [ + { + "bbox": [ + 314, + 162, + 366, + 173 + ], + "type": "text", + "content": "Affiliations:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 315, + 173, + 511, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 173, + 511, + 185 + ], + "spans": [ + { + "bbox": [ + 315, + 173, + 511, + 185 + ], + "type": "text", + "content": "1 Jawaharlal Nehru University, New Delhi, India" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 315, + 209, + 373, + 221 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 209, + 373, + 221 + ], + "spans": [ + { + "bbox": [ + 315, + 209, + 373, + 221 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 315, + 228, + 555, + 714 + ], + "type": "list", + "angle": 0, + "index": 47, + "blocks": [ + { + "bbox": [ + 319, + 228, + 555, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 228, + 555, + 274 + ], + "spans": [ + { + "bbox": [ + 319, + 228, + 555, + 274 + ], + "type": "text", + "content": "[1] Inigo Alonso and Ana C Murillo. Ev-segnet: Semantic segmentation for event-based cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops, pages 0-0, 2019. 1" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 320, + 274, + 554, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 274, + 554, + 319 + ], + "spans": [ + { + "bbox": [ + 320, + 274, + 554, + 319 + ], + "type": "text", + "content": "[2] Jiaan Chen, Hao Shi, Yaozu Ye, Kailun Yang, Lei Sun, and Kaiwei Wang. Efficient human pose estimation via 3d event point cloud. In 2022 International Conference on 3D Vision (3DV), pages 1-10. IEEE, 2022. 1" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 320, + 319, + 554, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 319, + 554, + 361 + ], + "spans": [ + { + "bbox": [ + 320, + 319, + 554, + 361 + ], + "type": "text", + "content": "[3] Liangyu Chen, Xiaojie Chu, Xiangyu Zhang, and Jian Sun. Simple baselines for image restoration. In European Conference on Computer Vision, pages 17-33. Springer, 2022. 5" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 320, + 365, + 554, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 365, + 554, + 430 + ], + "spans": [ + { + "bbox": [ + 320, + 365, + 554, + 430 + ], + "type": "text", + "content": "[4] Zheng Chen, Kai Liu, Jue Gong, Jingkai Wang, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on image super-resolution " + }, + { + "bbox": [ + 320, + 365, + 554, + 430 + ], + "type": "inline_equation", + "content": "(\\times 4)" + }, + { + "bbox": [ + 320, + 365, + 554, + 430 + ], + "type": "text", + "content": ": Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 320, + 432, + 554, + 498 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 432, + 554, + 498 + ], + "spans": [ + { + "bbox": [ + 320, + 432, + 554, + 498 + ], + "type": "text", + "content": "[5] Zheng Chen, Jingkai Wang, Kai Liu, Jue Gong, Lei Sun, Zongwei Wu, Radu Timofte, Yulun Zhang, et al. NTIRE 2025 challenge on real-world face restoration: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 320, + 499, + 554, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 499, + 554, + 543 + ], + "spans": [ + { + "bbox": [ + 320, + 499, + 554, + 543 + ], + "type": "text", + "content": "[6] Zhangyi Cheng, Xiang Zhang, Lei Yu, Jianzhuang Liu, Wen Yang, and Gui-Song Xia. Recovering continuous scene dynamics from a single blurry image with events. arXiv preprint arXiv:2304.02695, 2023. 8" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 320, + 544, + 554, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 544, + 554, + 588 + ], + "spans": [ + { + "bbox": [ + 320, + 544, + 554, + 588 + ], + "type": "text", + "content": "[7] Marcos Conde, Radu Timofte, et al. NTIRE 2025 challenge on raw image restoration and super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 320, + 590, + 554, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 590, + 554, + 643 + ], + "spans": [ + { + "bbox": [ + 320, + 590, + 554, + 643 + ], + "type": "text", + "content": "[8] Marcos Conde, Radu Timofte, et al. Raw image reconstruction from RGB on smartphones. NTIRE 2025 challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 320, + 646, + 554, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 646, + 554, + 689 + ], + "spans": [ + { + "bbox": [ + 320, + 646, + 554, + 689 + ], + "type": "text", + "content": "[9] Yuning Cui, Yi Tao, Zhenshan Bing, Wenqi Ren, Xinwei Gao, Xiaochun Cao, Kai Huang, and Alois Knoll. Selective frequency network for image restoration. In International Conference on Learning Representations, 2023. 10" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 315, + 691, + 554, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 691, + 554, + 714 + ], + "spans": [ + { + "bbox": [ + 315, + 691, + 554, + 714 + ], + "type": "text", + "content": "[10] Egor Ershov, Sergey Korchagin, Alexei Khalin, Artyom Panshin, Arseniy Terekhin, Ekaterina Zaychenkova, Georgiy" + } + ] + } + ], + "index": 46 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 714 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 76, + 72, + 295, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 72, + 295, + 139 + ], + "spans": [ + { + "bbox": [ + 76, + 72, + 295, + 139 + ], + "type": "text", + "content": "Lobarev, Vsevolod Plokhotnyuk, Denis Abramov, Elisey Zhdanov, Sofia Dorogova, Yasin Mamedov, Nikola Banic, Georgii Perevozchikov, Radu Timofte, et al. NTIRE 2025 challenge on night photography rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 140, + 295, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 295, + 205 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 295, + 205 + ], + "type": "text", + "content": "[11] Yuqian Fu, Xingyu Qiu, Bin Ren Yanwei Fu, Radu Timofte, Nicu Sebe, Ming-Hsuan Yang, Luc Van Gool, et al. NTIRE 2025 challenge on cross-domain few-shot object detection: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 208, + 295, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 208, + 295, + 262 + ], + "spans": [ + { + "bbox": [ + 56, + 208, + 295, + 262 + ], + "type": "text", + "content": "[12] Guillermo Gallego, Tobi Delbruck, Garrick Orchard, Chiara Bartolozzi, Brian Taba, Andrea Censi, Stefan Leutenegger, Andrew Davison, Jörg Conradt, Kostas Daniilidis, and Davide Scaramuzza. Event-based vision: A survey. IEEE Trans. Pattern Anal. Mach. Intell., 44(1):154-180, 2022. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 263, + 295, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 263, + 295, + 328 + ], + "spans": [ + { + "bbox": [ + 56, + 263, + 295, + 328 + ], + "type": "text", + "content": "[13] Shuhao Han, Haotian Fan, Fangyuan Kong, Wenjie Liao, Chunle Guo, Chongyi Li, Radu Timofte, et al. NTIRE 2025 challenge on text to image generation model quality assessment. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 331, + 295, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 331, + 295, + 374 + ], + "spans": [ + { + "bbox": [ + 56, + 331, + 295, + 374 + ], + "type": "text", + "content": "[14] Xuanhua He, Ke Cao, Jie Zhang, Keyu Yan, Yingying Wang, Rui Li, Chengjun Xie, Danfeng Hong, and Man Zhou. Panmamba: Effective pan-sharpening with state space model. Information Fusion, 115:102779, 2025. 7" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 376, + 295, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 376, + 295, + 420 + ], + "spans": [ + { + "bbox": [ + 56, + 376, + 295, + 420 + ], + "type": "text", + "content": "[15] Yuhuang Hu, Shih-Chii Liu, and Tobi Delbruck. v2e: From video frames to realistic dvs events. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1312-1321, 2021. 8" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 422, + 295, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 422, + 295, + 487 + ], + "spans": [ + { + "bbox": [ + 56, + 422, + 295, + 487 + ], + "type": "text", + "content": "[16] Varun Jain, Zongwei Wu, Quan Zou, Louis Florentin, Henrik Turbell, Sandeep Siddhartha, Radu Timofte, et al. NTIRE 2025 challenge on video quality enhancement for video conferencing: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 488, + 295, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 488, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 56, + 488, + 295, + 521 + ], + "type": "text", + "content": "[17] J Kim, D K Ghosh, and Y J Jung. Event-based video deblurring based on image and event feature fusion. Expert Systems with Applications, 223:119917, 2023. 10" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 522, + 295, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 522, + 295, + 577 + ], + "spans": [ + { + "bbox": [ + 56, + 522, + 295, + 577 + ], + "type": "text", + "content": "[18] Taewoo Kim, Hoonhee Cho, and Kuk-Jin Yoon. Frequency-aware event-based video deblurring for real-world motion blur. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24966-24976, 2024. 9, 10" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 578, + 295, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 578, + 295, + 644 + ], + "spans": [ + { + "bbox": [ + 56, + 578, + 295, + 644 + ], + "type": "text", + "content": "[19] Sangmin Lee, Eunpil Park, Angel Canelo, Hyunhee Park, Youngjo Kim, Hyungju Chun, Xin Jin, Chongyi Li, Chun-Le Guo, Radu Timofte, et al. NTIRE 2025 challenge on efficient burst hdr and restoration: Datasets, methods, and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 647, + 295, + 689 + ], + "type": "text", + "content": "[20] Huan Li, Hailong Shi, and Xingyu Gao. A coarse-to-fine fusion network for event-based image deblurring. In Proceedings of the International Joint Conference on Artificial Intelligence, pages 974-982, 2024. 9, 10" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 691, + 295, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 295, + 714 + ], + "type": "text", + "content": "[21] Xin Li, Yeying Jin, Xin Jin, Zongwei Wu, Bingchen Li, Yufei Wang, Wenhan Yang, Yu Li, Zhibo Chen, Bihan Wen, Robby" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 553, + 712 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 333, + 72, + 553, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 72, + 553, + 128 + ], + "spans": [ + { + "bbox": [ + 333, + 72, + 553, + 128 + ], + "type": "text", + "content": "Tan, Radu Timofte, et al. NTIRE 2025 challenge on day and night raindrop removal for dual-focused images: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 129, + 553, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 129, + 553, + 204 + ], + "spans": [ + { + "bbox": [ + 316, + 129, + 553, + 204 + ], + "type": "text", + "content": "[22] Xin Li, Xijun Wang, Bingchen Li, Kun Yuan, Yizhen Shao, Suhang Yao, Ming Sun, Chao Zhou, Radu Timofte, and Zhibo Chen. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Kwaisr dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 205, + 553, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 205, + 553, + 281 + ], + "spans": [ + { + "bbox": [ + 316, + 205, + 553, + 281 + ], + "type": "text", + "content": "[23] Xin Li, Kun Yuan, Bingchen Li, Fengbin Guan, Yizhen Shao, Zihao Yu, Xijun Wang, Yiting Lu, Wei Luo, Suhang Yao, Ming Sun, Chao Zhou, Zhibo Chen, Radu Timofte, et al. NTIRE 2025 challenge on short-formUGC video quality assessment and enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 283, + 553, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 283, + 553, + 335 + ], + "spans": [ + { + "bbox": [ + 316, + 283, + 553, + 335 + ], + "type": "text", + "content": "[24] Jingyun Liang, Jiezhang Cao, Guolei Sun, Kai Zhang, Luc Van Gool, and Radu Timofte. Swinir: Image restoration using swim transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 1833-1844, 2021. 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 338, + 553, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 338, + 553, + 403 + ], + "spans": [ + { + "bbox": [ + 316, + 338, + 553, + 403 + ], + "type": "text", + "content": "[25] Jie Liang, Radu Timofte, Qiaosi Yi, Zhengqiang Zhang, Shuaizheng Liu, Lingchen Sun, Rongyuan Wu, Xindong Zhang, Hui Zeng, Lei Zhang, et al. NTIRE 2025 the 2nd restore any image model (RAIM) in the wild challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 404, + 553, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 404, + 553, + 469 + ], + "spans": [ + { + "bbox": [ + 316, + 404, + 553, + 469 + ], + "type": "text", + "content": "[26] Kean Liu, Mingchen Zhong, Senyan Xu, Zhijing Sun, Jiaying Zhu, Chengjie Ge, Xin Lu, Xingbo Wang, Xueyang Fu, and Zheng-Jun Zha. Event-conditioned dual-modal fusion for motion deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 4" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 471, + 553, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 471, + 553, + 525 + ], + "spans": [ + { + "bbox": [ + 316, + 471, + 553, + 525 + ], + "type": "text", + "content": "[27] Xiaohong Liu, Xiongkuo Min, Qiang Hu, Xiaoyun Zhang, Jie Guo, et al. NTIRE 2025 XGC quality assessment challenge: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 526, + 553, + 591 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 526, + 553, + 591 + ], + "spans": [ + { + "bbox": [ + 316, + 526, + 553, + 591 + ], + "type": "text", + "content": "[28] Xiaoning Liu, Zongwei Wu, Florin-Alexandru Vasluianu, Hailong Yan, Bin Ren, Yulun Zhang, Shuhang Gu, Le Zhang, Ce Zhu, Radu Timofte, et al. NTIRE 2025 challenge on low light image enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 592, + 553, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 592, + 553, + 624 + ], + "spans": [ + { + "bbox": [ + 316, + 592, + 553, + 624 + ], + "type": "text", + "content": "[29] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. 7, 10" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 625, + 553, + 658 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 625, + 553, + 658 + ], + "spans": [ + { + "bbox": [ + 316, + 625, + 553, + 658 + ], + "type": "text", + "content": "[30] Xingyu Lu, Lei Sun, Diyang Gu, and Kaiwei Wang. Sge: structured light system based on gray code with an event camera. Optics Express, 32(26):46044-46061, 2024. 1" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 658, + 553, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 553, + 712 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 553, + 712 + ], + "type": "text", + "content": "[31] Xintian Mao, Qingli Li, and Yan Wang. Adarevd: Adaptive patch exiting reversible decoder pushes the limit of image deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 25681-25690, 2024. 10" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 139 + ], + "type": "text", + "content": "[32] Nico Messikommer, Stamatos Georgoulis, Daniel Gehrig, Stepan Tulyakov, Julius Erbach, Alfredo Bochicchio, Yuanyou Li, and Davide Scaramuzza. Multi-bracket high dynamic range imaging with event cameras. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 547-557, 2022. 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 140, + 294, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 140, + 294, + 195 + ], + "spans": [ + { + "bbox": [ + 56, + 140, + 294, + 195 + ], + "type": "text", + "content": "[33] Nico Messikommer, Carter Fang, Mathias Gehrig, and Davide Scaramuzza. Data-driven feature tracking for event cameras. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5642-5651, 2023. 9" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "spans": [ + { + "bbox": [ + 56, + 197, + 294, + 239 + ], + "type": "text", + "content": "[34] Manasi Muglikar, Guillermo Gallego, and Davide Scaramuzza. Esl: Event-based structured light. In 2021 International Conference on 3D Vision (3DV), pages 1165-1174. IEEE, 2021. 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 243, + 294, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 243, + 294, + 308 + ], + "spans": [ + { + "bbox": [ + 56, + 243, + 294, + 308 + ], + "type": "text", + "content": "[35] Seungjun Nah, Sungyong Baik, Seokil Hong, Gyeongsik Moon, Sanghyun Son, Radu Timofte, and Kyoung Mu Lee. Ntire 2019 challenge on video deblurring and superresolution: Dataset and study. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition workshops, pages 1996-2005, 2019. 8" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 309, + 294, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 309, + 294, + 365 + ], + "spans": [ + { + "bbox": [ + 56, + 309, + 294, + 365 + ], + "type": "text", + "content": "[36] Bin Ren, Hang Guo, Lei Sun, Zongwei Wu, Radu Timofte, Yawei Li, et al. The tenth NTIRE 2025 efficient superresolution challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 365, + 294, + 431 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 294, + 431 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 294, + 431 + ], + "type": "text", + "content": "[37] Nickolay Safonov, Alexey Bryntsev, Andrey Moskalenko, Dmitry Kulikov, Dmitriy Vatolin, Radu Timofte, et al. NTIRE 2025 challenge on UGC video enhancement: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 434, + 294, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 434, + 294, + 487 + ], + "spans": [ + { + "bbox": [ + 56, + 434, + 294, + 487 + ], + "type": "text", + "content": "[38] Timo Stoffregen, Cedric Scheerlinck, Davide Scaramuzza, Tom Drummond, Nick Barnes, Lindsay Kleeman, and Robert Mahony. Reducing the sim-to-real gap for event cameras. In European Conference on Computer Vision, pages 534-549, 2020. 8" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 489, + 294, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 489, + 294, + 555 + ], + "spans": [ + { + "bbox": [ + 56, + 489, + 294, + 555 + ], + "type": "text", + "content": "[39] Lei Sun, Christos Sakaridis, Jingyun Liang, Qi Jiang, Kailun Yang, Peng Sun, Yaozu Ye, Kaiwei Wang, and Luc Van Gool. Event-based fusion for motion deblurring with cross-modal attention. In European Conference on Computer Vision, pages 412-428. Springer, 2022. 1, 3, 4, 6, 7, 8, 9, 10, 11, 12" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 557, + 294, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 557, + 294, + 622 + ], + "spans": [ + { + "bbox": [ + 56, + 557, + 294, + 622 + ], + "type": "text", + "content": "[40] Lei Sun, Christos Sakaridis, Jingyun Liang, Peng Sun, Jiezhang Cao, Kai Zhang, Qi Jiang, Kaiwei Wang, and Luc Van Gool. Event-based frame interpolation with ad-hoc deblurring. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18043-18052, 2023. 2, 3, 4, 6, 7, 8, 9, 10" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 624, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 624, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 624, + 294, + 689 + ], + "type": "text", + "content": "[41] Lei Sun, Daniel Gehrig, Christos Sakaridis, Mathias Gehrig, Jingyun Liang, Peng Sun, Zhijie Xu, Kaiwei Wang, Luc Van Gool, and Davide Scaramuzza. A unified framework for event-based frame interpolation with ad-hoc deblurring in the wild. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2024. 1, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "text", + "content": "[42] Lei Sun, Andrea Alfarano, Peiqi Duan, Shaolin Su, Kaiwei Wang, Boxin Shi, Radu Timofte, Danda Pani Paudel, Luc" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 72, + 555, + 713 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 333, + 72, + 555, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 72, + 555, + 118 + ], + "spans": [ + { + "bbox": [ + 333, + 72, + 555, + 118 + ], + "type": "text", + "content": "Van Gool, et al. NTIRE 2025 challenge on event-based image deblurring: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 316, + 119, + 554, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 119, + 554, + 172 + ], + "spans": [ + { + "bbox": [ + 316, + 119, + 554, + 172 + ], + "type": "text", + "content": "[43] Lei Sun, Yuhan Bao, Jiajun Zhai, Jingyun Liang, Yu lun Zhang, Kaiwei Wang, Danda Pani Paudel, and Luc Van Gool. Low-light image enhancement using event-based illumination estimation. arXiv preprint arXiv:2504.09379, 2025.1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 175, + 554, + 229 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 554, + 229 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 554, + 229 + ], + "type": "text", + "content": "[44] Lei Sun, Hang Guo, Bin Ren, Luc Van Gool, Radu Timofte, Yawei Li, et al. The tenth nitre 2025 image denoising challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 231, + 554, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 231, + 554, + 275 + ], + "spans": [ + { + "bbox": [ + 316, + 231, + 554, + 275 + ], + "type": "text", + "content": "[45] Zhijing Sun, Xueyang Fu, Longzhuo Huang, Aiping Liu, and Zheng-Jun Zha. Motion aware event representation-driven image deblurring. In European Conference on Computer Vision, pages 418-435. Springer, 2024. 4" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 276, + 554, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 276, + 554, + 331 + ], + "spans": [ + { + "bbox": [ + 316, + 276, + 554, + 331 + ], + "type": "text", + "content": "[46] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Caitian Chen, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 image shadow removal challenge report. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 332, + 555, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 332, + 555, + 388 + ], + "spans": [ + { + "bbox": [ + 316, + 332, + 555, + 388 + ], + "type": "text", + "content": "[47] Florin-Alexandru Vasluianu, Tim Seizinger, Zhuyun Zhou, Zongwei Wu, Radu Timofte, et al. NTIRE 2025 ambient lighting normalization challenge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 388, + 554, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 388, + 554, + 454 + ], + "spans": [ + { + "bbox": [ + 316, + 388, + 554, + 454 + ], + "type": "text", + "content": "[48] Yingqian Wang, Zhengyu Liang, Fengyuan Zhang, Lvli Tian, Longguang Wang, Juncheng Li, Jungang Yang, Radu Timofte, Yulan Guo, et al. NTIRE 2025 challenge on light field image super-resolution: Methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 456, + 554, + 499 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 456, + 554, + 499 + ], + "spans": [ + { + "bbox": [ + 316, + 456, + 554, + 499 + ], + "type": "text", + "content": "[49] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 5" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 501, + 554, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 501, + 554, + 545 + ], + "spans": [ + { + "bbox": [ + 316, + 501, + 554, + 545 + ], + "type": "text", + "content": "[50] Wenming Weng, Yueyi Zhang, and Zhiwei Xiong. Event-based blurry frame interpolation under blind exposure. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1588-1598, 2023. 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 546, + 555, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 546, + 555, + 623 + ], + "spans": [ + { + "bbox": [ + 316, + 546, + 555, + 623 + ], + "type": "text", + "content": "[51] Kangning Yang, Jie Cai, Ling Ouyang, Florin-Alexandru Vasluianu, Radu Timofte, Jiaming Ding, Huiming Sun, Lan Fu, Jinlong Li, Chiu Man Ho, Zibo Meng, et al. NTIRE 2025 challenge on single image reflection removal in the wild: Datasets, methods and results. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 624, + 554, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 554, + 668 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 554, + 668 + ], + "type": "text", + "content": "[52] Wen Yang, Jinjian Wu, Jupo Ma, Leida Li, and Guangming Shi. Motion deblurring via spatial-temporal collaboration of frames and events. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 6531-6539, 2024. 6" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 670, + 554, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 554, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 554, + 713 + ], + "type": "text", + "content": "[53] Pierluigi Zama Ramirez, Fabio Tosi, Luigi Di Stefano, Radu Timofte, Alex Costanzino, Matteo Poggi, Samuele Salti, Stefano Mattoccia, et al. NTIRE 2025 challenge on hr depth from images of specular and transparent surfaces. In Pro" + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 295, + 296 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 75, + 72, + 294, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 72, + 294, + 95 + ], + "spans": [ + { + "bbox": [ + 75, + 72, + 294, + 95 + ], + "type": "text", + "content": "ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "spans": [ + { + "bbox": [ + 56, + 96, + 295, + 151 + ], + "type": "text", + "content": "[54] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao. Learning enriched features for real image restoration and enhancement. In European Conference on Computer Vision, pages 492-511. Springer, 2020. 5" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 152, + 294, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 152, + 294, + 217 + ], + "spans": [ + { + "bbox": [ + 56, + 152, + 294, + 217 + ], + "type": "text", + "content": "[55] Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang. Restormer: Efficient transformer for high-resolution image restoration. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5728-5739, 2022. 4, 5, 8, 9" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 219, + 294, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 219, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 56, + 219, + 294, + 251 + ], + "type": "text", + "content": "[56] Shaobo Zhang, Lei Sun, and Kaiwei Wang. A multi-scale recurrent framework for motion segmentation with event camera. IEEE Access, 11:80105-80114, 2023. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 252, + 294, + 296 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 252, + 294, + 296 + ], + "spans": [ + { + "bbox": [ + 56, + 252, + 294, + 296 + ], + "type": "text", + "content": "[57] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2472-2481, 2018. 7" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_content_list.json b/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a7ff06ca44e4f8d2a6acb4944567143987dc2133 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_content_list.json @@ -0,0 +1,967 @@ +[ + { + "type": "text", + "text": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment", + "text_level": 1, + "bbox": [ + 117, + 99, + 879, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Negar Arabzadeh", + "bbox": [ + 225, + 162, + 370, + 178 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "narabzad@uwaterloo.ca", + "bbox": [ + 217, + 180, + 379, + 191 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Waterloo", + "bbox": [ + 220, + 195, + 375, + 208 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Waterloo, Ontario, Canada", + "bbox": [ + 207, + 210, + 387, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Charles L.A. Clarke", + "bbox": [ + 620, + 162, + 779, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "claclark@uwaterloo.ca", + "bbox": [ + 622, + 179, + 777, + 191 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Waterloo", + "bbox": [ + 622, + 194, + 777, + 208 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Waterloo, Ontario, Canada", + "bbox": [ + 609, + 210, + 790, + 223 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 232, + 156, + 246 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) are increasingly used to automate relevance judgments for information retrieval (IR) tasks, often demonstrating agreement with human labels that approaches interhuman agreement. To assess the robustness and reliability of LLM-based relevance judgments, we systematically investigate impact of prompt sensitivity on the task. We collected prompts for relevance assessment from 15 human experts and 15 LLMs across three tasks — binary, graded, and pairwise — yielding 90 prompts in total. After filtering out unusable prompts from three humans and three LLMs, we employed the remaining 72 prompts with three different LLMs as judges to label document/query pairs from two TREC Deep Learning Datasets (2020 and 2021). We compare LLM-generated labels with TREC official human labels using Cohen's $\\kappa$ and pairwise agreement measures. In addition to investigating the impact of prompt variations on agreement with human labels, we compare human- and LLM-generated prompts and analyze differences among different LLMs as judges. We also compare human- and LLM-generated prompts with the standard UMBRELA prompt used for relevance assessment by Bing and TREC 2024 Retrieval Augmented Generation (RAG) Track. To support future research in LLM-based evaluation, we release all data and prompts at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/.", + "bbox": [ + 81, + 250, + 482, + 556 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS Concepts", + "text_level": 1, + "bbox": [ + 83, + 568, + 202, + 584 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Information systems $\\rightarrow$ Evaluation of retrieval results; Relevance assessment; Test collections.", + "bbox": [ + 81, + 587, + 482, + 614 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords", + "text_level": 1, + "bbox": [ + 83, + 628, + 169, + 643 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large Language Models, Relevance Judgments, Evaluation", + "bbox": [ + 83, + 646, + 436, + 660 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 84, + 678, + 218, + 691 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large Language Models (LLMs) are increasingly used for evaluation across various domains, including natural language processing and automated content assessment [1, 4, 9, 11, 28, 32]. The information retrieval (IR) community has been an early adopter of LLMs for relevance assessment [19, 24, 27, 35, 41]. Numerous studies have", + "bbox": [ + 81, + 696, + 482, + 766 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 779, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SIGIR '25, Padua, Italy", + "bbox": [ + 84, + 852, + 191, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.", + "bbox": [ + 83, + 864, + 472, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 979-8-4007-1592-1/2025/07", + "bbox": [ + 84, + 875, + 264, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/3726302.3730159", + "bbox": [ + 84, + 883, + 272, + 895 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "confirmed that LLM-generated relevance labels closely align with human labels under multiple measures of agreement [26, 36, 37].", + "bbox": [ + 513, + 233, + 911, + 261 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nonetheless, despite the widespread adoption of LLMs for relevance assessment, prompting strategies vary substantially across studies [2, 3, 20, 33]. An experiment reported at the LLM4Eval Workshop in SIGIR 2024 on Large Language Models for Evaluation in Information Retrieval [29], analyzed how different prompts influence agreement with human judgments and system rankings [28]. While multiple studies have examined how LLMs respond to different prompting strategies [5, 10, 23, 25, 34], these studies have generally been conducted with prompts tuned to specific LLMs and collections, or where prompt variants are constrained by templates [6]. As a complement to these studies, we report on a study of prompts from a variety of independent sources that have not been tuned to LLMs or collections, allowing us to examine the robustness of LLM-based relevance assessment under different prompting strategies. This investigation also allows us to compare different LLMs as judges to determine the degree to which different LLMs are sensitive to prompt modifications.", + "bbox": [ + 511, + 261, + 913, + 496 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We collected and analyzed prompts generated by both human experts and LLMs themselves. We designed a guideline for prompting LLMs to perform relevance assessment following three different approaches: binary, graded, and pairwise. While most previous studies have focused on graded relevance, we believe it is crucial to explore a wider range of relevance assessment methods, as they have proven effective in assessing different scenarios in the evaluation of information-seeking systems [7, 8, 13-15, 21, 22, 31, 38-40]. As a benefit to employing LLMs for relevance assessment, it becomes easier to explore different approaches to relevance assessment since human judges do not need to be recruited and trained separately for each approach.", + "bbox": [ + 511, + 496, + 913, + 662 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We recruited 15 human participants to create prompts for each of the three assessment approaches. As part of the recruitment process, we ensured that the participants were familiar with prompt engineering and relevance assessment principles, as detailed in Section 2. As a result of this inclusion criteria for recruitment, most participants were drawn from three academia NLP/IR labs. We also collected prompts from 15 different open source and commercial LLMs. Our primary goal is to understand prompt sensitivity in LLM-based relevance judgment [30], including its impact, robustness, and variation across different LLMs. Additionally, we explore the effectiveness of LLM as prompt generators.", + "bbox": [ + 511, + 662, + 913, + 815 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We performed relevance judgment experiments using data from two years of the TREC Deep Learning Track: DL 2020 [16], and DL 2021 [17]. Using the prompts created by both human participants and LLMs, we conducted relevance assessments on query-document pairs from these datasets using two open-source LLMs - LLaMA", + "bbox": [ + 511, + 815, + 913, + 883 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.12408v1 [cs.IR] 16 Apr 2025", + "bbox": [ + 22, + 272, + 58, + 704 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3.2-3b and Mistral 7b - and one commercial LLM GPT-4o. Our experiment incorporates the three approaches to relevance assessment (binary, graded, and pairwise) with prompts from both humans and LLMs using three different LLMs as judges. Through our experiments, we address the following research questions:", + "bbox": [ + 81, + 106, + 480, + 176 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Given a clear task objective, how do different prompts influence the effectiveness of each approach to LLM-based relevance judgment?", + "- RQ2. LLMs as Prompt Generators: How effective are LLM-generated prompts for relevance judgment, and how do they compare to human-crafted prompts?", + "- RQ3. Prompt Robustness Across LLMs: Are there prompts that consistently perform well across different LLMs, regardless of the model used as a judge?", + "- RQ4. Model-Specific Sensitivity to Prompts: Is prompt sensitivity consistent across all models, or do some LLMs show greater variability in performance?" + ], + "bbox": [ + 83, + 179, + 482, + 359 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To ensure reproducibility, we have made all data and experimental artifacts publicly available at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/. The study reported in this paper, and its associated data release, has received ethics clearance as human subjects research from our institution.", + "bbox": [ + 81, + 362, + 482, + 433 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Prompt Creation", + "text_level": 1, + "bbox": [ + 83, + 445, + 251, + 460 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Prompt generation", + "text_level": 1, + "bbox": [ + 83, + 465, + 281, + 479 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To investigate the impact of prompting on LLM-based relevance judgment, we collected data from both human participants and LLMs, ensuring that the task objective remained clear and consistent (sharing the same intent) across all participants. We prepared guidelines for prompt writing1, which provides detailed explanations of the three relevance judgment tasks: 1) Binary relevance — a passage is either relevant (1) or not relevant (0) to a query. 2) Graded relevance — a passage is rated on a 0-3 scale, where 3 indicates perfect relevance to the query. 3) Pairwise relevance — given two passages, chose the passage more relevant to the query. In the guideline, each task is illustrated with examples from the TREC Deep Learning 2019 [18], helping to ensure that both humans and LLMs had a well-defined understanding of the task. These examples could also be used as (few shot) examples if desired.", + "bbox": [ + 81, + 483, + 482, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The guidelines specify a Python-based format, where participants (both human and LLMs) were required to fill in structured Python dictionaries. More specifically, participants had to provide both the \"system message\" and \"user message\" fields for the prompts, following the format commonly used in LLM-based prompting (e.g., OpenAI models and open-source alternatives such as those from Ollama). This structured approach ensures compatibility across different LLM implementations.", + "bbox": [ + 81, + 676, + 482, + 787 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We recruited 15 human participants, each of whom had at least a Master's degree in computer science, were fluent in English, and had prior experience working with LLMs via API usage or coding. Additionally, these participants had previously published at least one paper in an IR-focused conference. Each participant received a $10 gift card as a token of appreciation for their time and effort.", + "bbox": [ + 81, + 787, + 482, + 871 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/5b718910606f79a68ac8219463a79dfb50a2541cf96d49063477e0e1ff84af54.jpg", + "table_caption": [ + "Table 1: List of LLMs used for prompt generation." + ], + "table_footnote": [], + "table_body": "
GPT-4oGPT-4o MiniClaude 3.5LLaMA 3.2Phi-4
Mistral-largeDeepSeek-v3Amazon-Nova-Pro-v1Gemma-2-9bGrok-2
Gemini 2Jamba-1.5Athene-v2GPT01GPT01 Mini
", + "bbox": [ + 519, + 132, + 924, + 172 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For prompt creation, we also used 15 different LLMs from the ChatBotArena² platform [12], which enables the execution of various LLMs online. We provided the same data collection guideline to the LMMs, including the task description and examples, ensuring that the LLMs received identical instructions to those given to human participants. Similar to human participants, each LLM was asked to complete the \"system message\" and \"user message\" fields in our Python function for relevance judgment. This setup allows us to systematically compare the impact of prompting across both groups. Table 1 provides the list of LLMs we used in this experiment for generating prompts for relevance judgments.", + "bbox": [ + 511, + 233, + 913, + 387 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Filtering and cleaning", + "text_level": 1, + "bbox": [ + 513, + 407, + 738, + 424 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To maintain consistency, we did not modify or provide additional instructions for any LLMs or human participants. Among the LLMs, two failed to complete the task because they deemed the task to be inappropriate, or repeatedly asked about examples. Among human participants, only one used a few-shot approach with examples. The rest did not provide any examples in their prompts. When testing the outputs of the collected prompts, not all of them were able to generate the expected format cleanly. Some prompts produced responses that required additional cleaning, such as verbose outputs like \"The passage is relevant, so the answer is: 1\" instead of simply returning 1. To ensure consistency, we examined the all generated output and applied necessary cleaning. After filtering and cleaning, we finalized 12 human-generated prompts and 12 LLM-generated prompts for use in our experiments.", + "bbox": [ + 511, + 426, + 913, + 621 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.3 Prompt Diversity", + "text_level": 1, + "bbox": [ + 513, + 642, + 700, + 657 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To better understand the variation in prompts, we examined the diversity of both human-generated and LLM-generated prompts. Specifically, we analyzed both user prompts and system prompts separately, as they serve distinct roles in guiding the LLM's response. In a prompt the user message provides the direct instructions given to the model, specifying what information is needed. In contrast, the system message provides context for the task, defining the LLM's role and expected behavior (e.g., \"You are an expert relevance judgment assessor\"). Figure 1 illustrates the distribution of unique terms used across all human-generated (in green) and LLM-generated (in red) prompts. As shown in this figure, human-generated prompts exhibit greater diversity in wording when compared to LLM-generated ones. This suggests that humans introduce more nuanced descriptions and varied phrasing when defining the task, while LLM-generated system prompts tend to rely on more standardized language. Additionally, system messages exhibit greater lexical diversity compared to user messages.", + "bbox": [ + 511, + 660, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy", + "bbox": [ + 84, + 75, + 274, + 85 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Negar Arabzadeh and Charles L.A. Clarke", + "bbox": [ + 712, + 75, + 913, + 87 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1https://bit.ly/4hP0EMg", + "bbox": [ + 84, + 883, + 199, + 895 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1fc35f1fb4afa7dc61ae8b75f2e5a0925dc916b91e1fc9063dd02801e0432c0d.jpg", + "image_caption": [ + "Figure 1: Diversity of words across human and LLM-generated prompts." + ], + "image_footnote": [], + "bbox": [ + 122, + 109, + 434, + 224 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Experimental Methodology", + "text_level": 1, + "bbox": [ + 84, + 297, + 334, + 313 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data We utilize the TREC Deep Learning Track datasets from 2020 and 2021. The DL-20 dataset contains 54 judged queries with 11,386 relevance assessments from MS MARCO V1 collection, while the DL-21 dataset includes 53 judged queries and 10,828 assessments from MS MARCO V2. Both datasets have been manually annotated by NIST assessors following the TREC relevance judgment guidelines. The assessors evaluate each document-query pair based on a graded relevance scale, ranging from not relevant (0) to highly relevant (3). The assessment process involves pooling top-ranked documents from multiple retrieval systems, which were then judged by human annotators. Using this data allows us to compare the three different variations of LLM-based judgments i.e., binary, graded, and pairwise. For graded relevance, we compare against the actual graded labels. For binary judgments, following prior work [19, 37], we classify levels 2 and 3 as relevant and levels 0 and 1 as non-relevant. For pairwise judgments, we compare documents with different relevance levels, assuming that a document with a higher relevance level should be ranked as more relevant than one with a lower relevance level.", + "bbox": [ + 86, + 316, + 480, + 577 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLMs for Relevance Judgments. To perform relevance assessment, we employed three different LLMs: one commercial model, GPT-40, and two open-source models, LLaMA 3.2-3B and Mistral-7B. We implemented our experiments using OpenAI and Ollama, running all prompts with a temperature setting of 0.", + "bbox": [ + 84, + 579, + 480, + 648 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Data Sampling. We conducted experiments on all query-document pairs for binary and graded relevance judgments using the open-source models. However, due to computational constraints, we were unable to run all 24 valid prompts across all query-document pairs for GPT-40. Instead, we randomly sampled up to 10 documents per query for each of the four relevance levels (0-3). If fewer than 10 documents were available for a given relevance level, we included all available documents. For pairwise judgments, evaluating all possible pairs was not feasible due to their quadratic growth. Instead, we categorized documents for each query into three groups: \"highly relevant\", \"relevant\", and \"non-relevant\". The \"highly relevant\" category corresponds to the highest available relevance level for that query, which in TREC-style annotations could be level 3 or level 2, depending on availability. The \"non-relevant\" category includes all level 0 documents, while any intermediate relevance level (typically level 1, or levels 1 and 2 if level 3 exists) was classified as \"relevant\".", + "bbox": [ + 86, + 648, + 482, + 869 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/14ed2005b76ba13d7e932f558d55c0b027ef2666ca965213ba06bad4416456ce.jpg", + "table_caption": [ + "Table 2: Mean and variance of agreement between LLM-based and human relevance judgments across different settings." + ], + "table_footnote": [], + "table_body": "
Modelcrafted byBinaryGradedPairwise
MeanVarianceMeanVarianceMeanVariance
GPT-4oLLM0.4340.0030.2150.0010.8490.000
Human0.2700.0980.2150.0010.5780.139
LLaMA 3.2LLM0.3030.0100.0330.0020.4390.066
Human0.1670.0410.1020.0030.3300.073
MistralLLM0.4050.0010.0080.0040.5740.014
Human0.2430.0510.0040.0050.4420.073
", + "bbox": [ + 519, + 145, + 934, + 262 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "From these three categories, we constructed document pairs for pairwise judgments. Specifically, we sampled 10 pairs per query from each of the following comparisons: \"highly relevant vs. non-relevant\", \"relevant vs. non-relevant\", and \"highly relevant vs. relevant\" (up to 30 pairs in total). If fewer than 10 pairs were available for a given comparison, we included as many as possible. Additionally, for the pairwise setting, we minimized positional bias by evaluating each document pair twice, swapping the order of the documents in the second run. The result is counted as \"agree\" if the LLM favors the more relevant passage in both comparisons, \"tie\" if the LLM's decisions are inconsistent when the passage order is swapped, and \"disagree\" if the LLM consistently selects the passage with a lower relevance level assigned by human annotators.", + "bbox": [ + 517, + 295, + 911, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "4 Results and Findings", + "text_level": 1, + "bbox": [ + 517, + 489, + 712, + 506 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In order to explore the research questions raised in the introduction, we investigated the agreement of LLM-based relevance judgments from different prompts with human annotations on TREC 2020 and 2021 using three different LLMs, as shown in Figure 2. For binary and graded relevance judgments, agreement is measured using Cohen's Kappa $(\\kappa)$ . For pairwise judgments, since the task involves assessing agreement with the actual ranking of pairs, we report the percentage of cases where the LLM's preference agrees with the expected order. In this figure, the leftmost two columns represent the results for binary, the middle two columns correspond to graded, and the rightmost two columns display the results from pairwise relevance judgment. The green, blue, and red bars indicate agreement for GPT-4o, LLAMA 3.2, and Mistral, respectively. In each pair of plots, the left plot presents results for DL-20, while the right plot corresponds to DL-21. The bottom 12 bars represent prompts crafted by LLMs; on top of them there are 12 bars corresponding to prompts created by humans.", + "bbox": [ + 517, + 508, + 911, + 742 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In addition to results from the human- and LLM-written prompts, we also report the results of UMBRELA assessments at the top of the graded relevance sub-figure (middle). UMBRELA is an open-source reproduction of Microsoft's Bing LLM-based relevance assessor [35], designed to automate relevance judgments effectively [36, 37]. It follows a structured prompting approach and has demonstrated high correlation with both human annotations and system rankings across multiple TREC Deep Learning Tracks (2019-2023). Notably, UMBRELA has been integrated into TREC 2024 RAG for automated evaluation, which further validated its reliability as an alternative to human assessors. We consider UMBRELA a reliable and effective", + "bbox": [ + 517, + 744, + 911, + 893 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment", + "bbox": [ + 84, + 75, + 521, + 87 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy", + "bbox": [ + 723, + 75, + 911, + 87 + ], + "page_idx": 2 + }, + { + "type": "page_footnote", + "text": "2https://Imarena.ai/", + "bbox": [ + 84, + 883, + 178, + 895 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/6a0acc0eba950fda3479939265810549aec4da0f6fa29aaac59d68a2f27cbb35.jpg", + "image_caption": [ + "Figure 2: Agreement of LLM-based relevance judgments with human annotations across different prompts and relevance judgment tasks. UMBRELA represents the reproduction of Bing's LLM assessor introduced in [37]. Otherwise, the top 12 bars $(\\mathbf{H}^{*})$ represent human-crafted prompts, while the bottom 12 correspond to LLM-generated prompts. The dashed lines show the mean of agreement in LLM -crafted prompts and human-crafted prompts separately." + ], + "image_footnote": [], + "bbox": [ + 101, + 104, + 898, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "prompt and we believe comparing its performance against human-crafted and LLM-generated prompts in graded relevance judgments would bring interesting insights. Additionally, Table 2 summarizes Figure 2 by providing the mean and variance of agreement scores across the two datasets and different relevance judgments.", + "bbox": [ + 81, + 638, + 482, + 708 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We now consider investigating each of our research questions in light of these agreement results.", + "bbox": [ + 81, + 708, + 482, + 736 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Figure 2 and Table 2 reveal significant variance across different LLM-based relevance judgment approaches. Binary and pairwise methods exhibit the least sensitivity to input prompts, maintaining more consistent agreement. In contrast, graded relevance judgments are highly sensitive to prompt variations. We note that while binary and pairwise methods operate with only two choices, graded relevance introduces greater variability. Particularly on graded judgments, GPT-40 demonstrates relatively stable performance but LLaMA 3.2 and Mistral show considerable fluctuations across different prompts.", + "bbox": [ + 81, + 736, + 482, + 888 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "RQ2. LLMs as Prompt Generators: Table 2 shows that LLM-generated prompts generally yield higher average agreement with human annotations. However, for graded relevance judgments, the difference is minimal. This may be due to (i) participants' greater familiarity with graded assessments or (ii) the inherently subjective nature of assigning relevance levels, which may require more calibration with human annotators. Additionally, LLM-generated prompts exhibit lower variance in agreement compared to human-crafted prompts, indicating less sensitivity to prompt variations.", + "RQ3. Prompt Robustness Across LLMs: Figure 3 analyzes inter-agreement rates among different prompt groups using Krippendorff's alpha. Here we measure agreement between different prompt's output, regardless of their alignment with human judgments. The results show that LLM-generated prompts exhibit higher inter-agreement than human-crafted ones, likely due to the greater linguistic diversity in human-generated prompts, as seen in Figure 1. This suggests that LLM-generated prompts are more robust" + ], + "bbox": [ + 511, + 638, + 913, + 875 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy", + "bbox": [ + 84, + 75, + 274, + 85 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Negar Arabzadeh and Charles L.A. Clarke", + "bbox": [ + 712, + 75, + 913, + 85 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/32d199718463392947d076c7e80ed01b8a647bf3a5e940ced5752b21d5a01dc2.jpg", + "image_caption": [ + "Figure 3: Krippendorff's inter-agreement rate between all the prompts on two datasets." + ], + "image_footnote": [], + "bbox": [ + 122, + 107, + 434, + 251 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "than human-crafted ones. While some human-crafted prompts performed well across all models, prompt effectiveness varies significantly between LLMs, with no single prompt consistently excelling across all models. However, for graded assessments, UMBRELA consistently demonstrated high performance across different LLMs and it emerged as one of the most effective prompts across all models. UMBRELA had previously shown strong correlation with human judgments on TREC DL tracks [37]. We hypothesize that UMBRELA's strong and consistent performance may stem from how its prompt deconstructs the concept of relevance into finer-grained aspects, such as trustworthiness and alignment with intent. This structured approach likely prevents the LLM from relying on its own interpretation of relevance.", + "bbox": [ + 81, + 316, + 482, + 494 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "RQ4. Model-Specific Sensitivity to Prompts: From Figure 2, we observe that GPT-4o demonstrates high consistency across most prompts and all relevance assessment approaches. In contrast, the performance of LLaMA 3.2 and Mistral varies significantly depending on the prompt and assessment method. This variability is further confirmed by the variance of agreement reported in Table 2. Notably, GPT-4o exhibits consistently low variance in agreement, particularly when prompted with LLM-crafted prompts.", + "bbox": [ + 81, + 496, + 482, + 608 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5 Conclusion and Limitations", + "text_level": 1, + "bbox": [ + 83, + 619, + 341, + 633 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In this study, we investigated the sensitivity of LLM-based relevance judgments to different prompting strategies across multiple models. We examined how prompts, whether human- or LLM-generated, influence judgment effectiveness, their robustness across different LLMs, and the extent to which models exhibit variability in response to prompt modifications. One specific outcome is to confirm the performance of UMBRELA as a leading prompt for LLM-based graded relevance assessment. Despite these contributions, our study has limitations. Our human participants primarily had a computer science background with experience writing prompts for LLMs. Additionally, we evaluated only three LLMs as judges, limiting the generalizability of our findings.", + "bbox": [ + 81, + 637, + 482, + 804 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 84, + 815, + 176, + 828 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Marwah Alaofi, Negar Arabzadeh, Charles LA Clarke, and Mark Sanderson. 2024. Generative information retrieval evaluation. In Information Access in the Era of Generative AI. Springer, 135-159.", + "[2] Neger Arabzadeh, Amin Bigdeli, and Charles L. A. Clarke. 2024. Adapting Standard Retrieval Benchmarks to Evaluate Generated Answers. In 46th European Conference on Information Retrieval. Glasgow, Scotland." + ], + "bbox": [ + 89, + 830, + 482, + 893 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[3] Negar Arabzadeh and Charles LA Clarke. 2024. A Comparison of Methods for Evaluating Generative IR. arXiv preprint arXiv:2404.04044 (2024).", + "[4] Negar Arabzadeh, Siqing Huo, Nikhil Mehta, Qingyun Wu, Chi Wang, Ahmed Hassan Awadallah, Charles L. A. Clarke, and Julia Kiseleva. 2024. Assessing and Verifying Task Utility in LLM-Powered Applications. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (Eds.). Association for Computational Linguistics, Miami, Florida, USA, 21868-21888. doi:10.18653/v1/2024.emnlp-main.1219", + "[5] Simran Arora, Avanika Narayan, Mayee F. Chen, Laurel Orr, Neel Guha, Kush Bhatia, Ines Chami, Frederic Sala, and Christopher Re. 2022. Ask Me Anything: A simple strategy for prompting language models. arXiv:2210.02441 [cs.CL] https://arxiv.org/abs/2210.02441", + "[6] Leif Azzopardi, Charles LA Clarke, Paul Kantor, Bhaskar Mitra, Johanne R Trippas, Zhaochun Ren, Mohammad Aliennejadi, Negar Arabzadeh, Raman Chandrasekar, Maarten de Rijke, et al. 2024. Report on The Search Futures Workshop at ECIR 2024. In ACM SIGIR Forum, Vol. 58. ACM New York, NY, USA, 1-41.", + "[7] Chris Buckley and Ellen M Voorhees. 2004. Retrieval evaluation with incomplete information. In Proceedings of the 27th annual international ACM SIGIR conference on Research and development in information retrieval. 25-32.", + "[8] Ben Carterette, Paul N. Bennett, David Maxwell Chickering, and Susan T. Dumais. 2008. Here or there: Preference judgments for Relevance. Computer Science Department Faculty Publication Series 46. University of Massachusetts Amherst.", + "[9] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. 2024. A survey on evaluation of large language models. ACM Transactions on Intelligent Systems and Technology 15, 3 (2024), 1-45.", + "[10] Anowoy Chatterjee, HSVNS Kowndinya Renduchintala, Sumit Bhatia, and Tanmoy Chakraborty. 2024. POSIX: A Prompt Sensitivity Index For Large Language Models. arXiv preprint arXiv:2410.02185 (2024).", + "[11] Cheng-Han Chiang and Hung-yi Lee. 2023. Can large language models be an alternative to human evaluations? arXiv preprint arXiv:2305.01937 (2023).", + "[12] Wei-Lin Chiang, Lianmin Zheng, Ying Sheng, Anastasios Nikolas Angelopoulos, Tianle Li, Dacheng Li, Hao Zhang, Banghua Zhu, Michael Jordan, Joseph E Gonzalez, et al. 2024. Chatbot arena: An open platform for evaluating llms by human preference. arXiv preprint arXiv:2403.04132 (2024).", + "[13] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing Top- $k$ Preferences. ACM Trans. Inf. Syst. 39, 3, Article 33 (may 2021), 21 pages. doi:10.1145/3451161", + "[14] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing top- $k$ preferences. ACM Transactions on Information Systems 39, 3 (July 2021).", + "[15] Cyril W Cleverdon. 1991. The significance of the Cranfield tests on index languages. In Proceedings of the 14th annual international ACM SIGIR conference on Research and development in information retrieval. 3-12.", + "[16] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the TREC 2020 deep learning track. arXiv:2102.07662 [cs.IR] https://arxiv.org/abs/2102.07662", + "[17] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Jimmy Lin. 2022. Overview of the TREC 2021 deep learning track. In Text REtrieval Conference (TREC). NIST, TREC. https://www.microsoft.com/en-us/research/publication/overview-of-the-trec-2021-deep-learning-track/", + "[18] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M Voorhees. 2020. Overview of the TREC 2019 deep learning track. arXiv preprint arXiv:2003.07820 (2020).", + "[19] Gugliemo Faggioli, Laura Dietz, Charles LA Clarke, Gianluca Demartini, Matthias Hagen, Claudia Hauff, Noriko Kando, Evangelos Kanoulas, Martin Potthast, Benno Stein, et al. 2023. Perspectives on large language models for relevance judgment. In Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval. 39-50.", + "[20] Naghmeh Farzi and Laura Dietz. 2024. Pencils down! automatic rubric-based evaluation of retrieve/generate systems. In Proceedings of the 2024 ACM SIGIR International Conference on Theory of Information Retrieval. 175-184.", + "[21] David Hawking, Ellen Voorhees, Nick Craswell, Peter Bailey, et al. 1999. Overview of the trec-8 web track. In TREC.", + "[22] Gabriella Kazai, Emine Yilmaz, Nick Craswell, and S.M.M. Tahaghoghi. 2013. User Intent and Assessor Disagreement in Web Search Evaluation. In 22nd ACM International Conference on Information and Knowledge Management. San Francisco, California, 699-708.", + "[23] Alina Leidinger, Robert van Rooij, and Ekaterina Shutova. 2023. The language of prompting: What linguistic properties make a prompt successful? arXiv:2311.01967 [cs.CL] https://arxiv.org/abs/2311.01967", + "[24] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. 2024. From Generation to Judgment: Opportunities and Challenges of LLM-as-a-judge. arXiv preprint arXiv:2411.16594 (2024).", + "[25] Sheng Lu, Hendrik Schuff, and Iryna Gurevych. 2024. How are Prompts Different in Terms of Sensitivity? In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language" + ], + "bbox": [ + 517, + 108, + 913, + 895 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment", + "bbox": [ + 83, + 75, + 522, + 87 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy", + "bbox": [ + 723, + 75, + 913, + 87 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Technologies (Volume 1: Long Papers), Kevin Duh, Helena Gomez, and Steven Bethard (Eds.). Association for Computational Linguistics, Mexico City, Mexico, 5833-5856. doi:10.18653/v1/2024.nacl-long.325", + "[26] Sean MacAvaney and Luca Soldaini. 2023. One-shot labeling for automatic relevance estimation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2230-2235.", + "[27] Chuan Meng, Negar Arabzadeh, Arian Askari, Mohammad Aliannejadi, and Maarten de Rijke. 2024. Query Performance Prediction using Relevance Judgments Generated by Large Language Models. arXiv preprint arXiv:2404.01012 (2024).", + "[28] Hossein A Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles LA Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Llm4eval: Large language model for evaluation in ir. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 3040-3043.", + "[29] Hossein A. Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles L. A. Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Report on the 1st Workshop on Large Language Model for Evaluation in Information Retrieval (LLM4Eval 2024) at SIGIR 2024. arXiv:2408.05388 [cs.IR] https://arxiv.org/abs/2408.05388", + "[30] Amirhossein Razavi, Mina Soltangheis, Neger Arabzadeh, Sara Salamat, Morteza Zihayat, and Ebrahim Bagheri. 2025. Benchmarking Prompt Sensitivity in Large Language Models. arXiv preprint arXiv:2502.06065 (2025).", + "[31] Tetsuya Sakai and Zhaohao Zeng. 2020. Good evaluation measures based on document preferences. In 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 359-368.", + "[32] Alireza Salemi and Hamed Zamani. 2024. Evaluating retrieval quality in retrieval-augmented generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2395-2400.", + "[33] David P Sander and Laura Dietz. 2021. EXAM: How to Evaluate Retrieve-and-Generate Systems for Users Who Do Not (Yet) Know What They Want.. In" + ], + "bbox": [ + 84, + 108, + 482, + 421 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "DESIREs. 136-146.", + "[34] Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. 2023. Quantifying Language Models' Sensitivity to Spurious Features in Prompt Design or: How I learned to start worrying about prompt formatting. arXiv preprint arXiv:2310.11324 (2023).", + "[35] Paul Thomas, Seth Spielman, Nick Craswell, and Bhaskar Mitra. 2023. Large Language Models Can Accurately Predict Searcher Preferences. arXiv preprint arXiv:2309.10621 (2023).", + "[36] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Daniel Campos, Nick Craswell, Ian Soboroff, Hoa Trang Dang, and Jimmy Lin. 2024. A Large-Scale Study of Relevance Assessments with Large Language Models: An Initial Look. arXiv:2411.08275 [cs.IR] https://arxiv.org/abs/2411.08275", + "[37] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Nick Craswell, and Jimmy Lin. 2024. UMBRELA: Umbrela is the (Open-Source Reproduction of the) Bing RELevance Assessor. arXiv preprint arXiv:2406.06519 (2024).", + "[38] Ellen M Voorhees. 2000. Report on trec-9. In ACM SIGIR Forum, Vol. 34. ACM New York, NY, USA, 1-8.", + "[39] Xiaohui Xie, Jiaxin Mao, Yiqun Liu, Maarten de Rijke, Haitian Chen, Min Zhang, and Shaoping Ma. 2020. Preference-based evaluation metrics for web image search. In 43st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval. Xi'an, China.", + "[40] Xinyi Yan, Chengxi Luo, Charles L. A. Clarke, Nick Craswell, Ellen M. Voorhees, and Pablo Castells. 2022. Human Preferences as Dueling Bandits. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '22). ACM. doi:10.1145/3477495.3531991", + "[41] Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang, and Michael Berdersky. 2023. Beyond Yes and No: Improving Zero-Shot LLM Rankers via Scoring Fine-Grained Relevance Labels. arXiv preprint arXiv:2310.14122 (2023)." + ], + "bbox": [ + 516, + 109, + 913, + 402 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "SIGIR '25, July 13-18, 2025, Padua, Italy", + "bbox": [ + 84, + 75, + 274, + 85 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Negar Arabzadeh and Charles L.A. Clarke", + "bbox": [ + 712, + 75, + 913, + 87 + ], + "page_idx": 5 + } +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_model.json b/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c24c47a0d7f4bd00e1ccadefcc2d2a52f3b9e0dc --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_model.json @@ -0,0 +1,1477 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.273, + 0.06, + 0.705 + ], + "angle": 270, + "content": "arXiv:2504.12408v1 [cs.IR] 16 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.118, + 0.101, + 0.88, + 0.152 + ], + "angle": 0, + "content": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.163, + 0.371, + 0.179 + ], + "angle": 0, + "content": "Negar Arabzadeh" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.181, + 0.38, + 0.193 + ], + "angle": 0, + "content": "narabzad@uwaterloo.ca" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.196, + 0.376, + 0.209 + ], + "angle": 0, + "content": "University of Waterloo" + }, + { + "type": "text", + "bbox": [ + 0.208, + 0.211, + 0.388, + 0.224 + ], + "angle": 0, + "content": "Waterloo, Ontario, Canada" + }, + { + "type": "text", + "bbox": [ + 0.621, + 0.163, + 0.78, + 0.178 + ], + "angle": 0, + "content": "Charles L.A. Clarke" + }, + { + "type": "text", + "bbox": [ + 0.624, + 0.18, + 0.778, + 0.193 + ], + "angle": 0, + "content": "claclark@uwaterloo.ca" + }, + { + "type": "text", + "bbox": [ + 0.624, + 0.195, + 0.778, + 0.209 + ], + "angle": 0, + "content": "University of Waterloo" + }, + { + "type": "text", + "bbox": [ + 0.61, + 0.211, + 0.791, + 0.224 + ], + "angle": 0, + "content": "Waterloo, Ontario, Canada" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.233, + 0.158, + 0.247 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.251, + 0.483, + 0.557 + ], + "angle": 0, + "content": "Large Language Models (LLMs) are increasingly used to automate relevance judgments for information retrieval (IR) tasks, often demonstrating agreement with human labels that approaches interhuman agreement. To assess the robustness and reliability of LLM-based relevance judgments, we systematically investigate impact of prompt sensitivity on the task. We collected prompts for relevance assessment from 15 human experts and 15 LLMs across three tasks — binary, graded, and pairwise — yielding 90 prompts in total. After filtering out unusable prompts from three humans and three LLMs, we employed the remaining 72 prompts with three different LLMs as judges to label document/query pairs from two TREC Deep Learning Datasets (2020 and 2021). We compare LLM-generated labels with TREC official human labels using Cohen's \\(\\kappa\\) and pairwise agreement measures. In addition to investigating the impact of prompt variations on agreement with human labels, we compare human- and LLM-generated prompts and analyze differences among different LLMs as judges. We also compare human- and LLM-generated prompts with the standard UMBRELA prompt used for relevance assessment by Bing and TREC 2024 Retrieval Augmented Generation (RAG) Track. To support future research in LLM-based evaluation, we release all data and prompts at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.569, + 0.203, + 0.585 + ], + "angle": 0, + "content": "CCS Concepts" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.588, + 0.483, + 0.615 + ], + "angle": 0, + "content": "- Information systems \\(\\rightarrow\\) Evaluation of retrieval results; Relevance assessment; Test collections." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.629, + 0.17, + 0.644 + ], + "angle": 0, + "content": "Keywords" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.647, + 0.437, + 0.661 + ], + "angle": 0, + "content": "Large Language Models, Relevance Judgments, Evaluation" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.679, + 0.219, + 0.692 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.697, + 0.483, + 0.767 + ], + "angle": 0, + "content": "Large Language Models (LLMs) are increasingly used for evaluation across various domains, including natural language processing and automated content assessment [1, 4, 9, 11, 28, 32]. The information retrieval (IR) community has been an early adopter of LLMs for relevance assessment [19, 24, 27, 35, 41]. Numerous studies have" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.78, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.853, + 0.192, + 0.864 + ], + "angle": 0, + "content": "SIGIR '25, Padua, Italy" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.865, + 0.473, + 0.875 + ], + "angle": 0, + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.875, + 0.265, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 979-8-4007-1592-1/2025/07" + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.885, + 0.273, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/3726302.3730159" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.234, + 0.913, + 0.262 + ], + "angle": 0, + "content": "confirmed that LLM-generated relevance labels closely align with human labels under multiple measures of agreement [26, 36, 37]." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.262, + 0.915, + 0.497 + ], + "angle": 0, + "content": "Nonetheless, despite the widespread adoption of LLMs for relevance assessment, prompting strategies vary substantially across studies [2, 3, 20, 33]. An experiment reported at the LLM4Eval Workshop in SIGIR 2024 on Large Language Models for Evaluation in Information Retrieval [29], analyzed how different prompts influence agreement with human judgments and system rankings [28]. While multiple studies have examined how LLMs respond to different prompting strategies [5, 10, 23, 25, 34], these studies have generally been conducted with prompts tuned to specific LLMs and collections, or where prompt variants are constrained by templates [6]. As a complement to these studies, we report on a study of prompts from a variety of independent sources that have not been tuned to LLMs or collections, allowing us to examine the robustness of LLM-based relevance assessment under different prompting strategies. This investigation also allows us to compare different LLMs as judges to determine the degree to which different LLMs are sensitive to prompt modifications." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.497, + 0.915, + 0.663 + ], + "angle": 0, + "content": "We collected and analyzed prompts generated by both human experts and LLMs themselves. We designed a guideline for prompting LLMs to perform relevance assessment following three different approaches: binary, graded, and pairwise. While most previous studies have focused on graded relevance, we believe it is crucial to explore a wider range of relevance assessment methods, as they have proven effective in assessing different scenarios in the evaluation of information-seeking systems [7, 8, 13-15, 21, 22, 31, 38-40]. As a benefit to employing LLMs for relevance assessment, it becomes easier to explore different approaches to relevance assessment since human judges do not need to be recruited and trained separately for each approach." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.664, + 0.914, + 0.816 + ], + "angle": 0, + "content": "We recruited 15 human participants to create prompts for each of the three assessment approaches. As part of the recruitment process, we ensured that the participants were familiar with prompt engineering and relevance assessment principles, as detailed in Section 2. As a result of this inclusion criteria for recruitment, most participants were drawn from three academia NLP/IR labs. We also collected prompts from 15 different open source and commercial LLMs. Our primary goal is to understand prompt sensitivity in LLM-based relevance judgment [30], including its impact, robustness, and variation across different LLMs. Additionally, we explore the effectiveness of LLM as prompt generators." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.816, + 0.914, + 0.885 + ], + "angle": 0, + "content": "We performed relevance judgment experiments using data from two years of the TREC Deep Learning Track: DL 2020 [16], and DL 2021 [17]. Using the prompts created by both human participants and LLMs, we conducted relevance assessments on query-document pairs from these datasets using two open-source LLMs - LLaMA" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.275, + 0.087 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.714, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Negar Arabzadeh and Charles L.A. Clarke" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.177 + ], + "angle": 0, + "content": "3.2-3b and Mistral 7b - and one commercial LLM GPT-4o. Our experiment incorporates the three approaches to relevance assessment (binary, graded, and pairwise) with prompts from both humans and LLMs using three different LLMs as judges. Through our experiments, we address the following research questions:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.18, + 0.483, + 0.235 + ], + "angle": 0, + "content": "- RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Given a clear task objective, how do different prompts influence the effectiveness of each approach to LLM-based relevance judgment?" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.236, + 0.483, + 0.277 + ], + "angle": 0, + "content": "- RQ2. LLMs as Prompt Generators: How effective are LLM-generated prompts for relevance judgment, and how do they compare to human-crafted prompts?" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.278, + 0.482, + 0.319 + ], + "angle": 0, + "content": "- RQ3. Prompt Robustness Across LLMs: Are there prompts that consistently perform well across different LLMs, regardless of the model used as a judge?" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.319, + 0.483, + 0.36 + ], + "angle": 0, + "content": "- RQ4. Model-Specific Sensitivity to Prompts: Is prompt sensitivity consistent across all models, or do some LLMs show greater variability in performance?" + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.18, + 0.483, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.363, + 0.483, + 0.434 + ], + "angle": 0, + "content": "To ensure reproducibility, we have made all data and experimental artifacts publicly available at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/. The study reported in this paper, and its associated data release, has received ethics clearance as human subjects research from our institution." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.446, + 0.253, + 0.461 + ], + "angle": 0, + "content": "2 Prompt Creation" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.466, + 0.282, + 0.481 + ], + "angle": 0, + "content": "2.1 Prompt generation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.484, + 0.483, + 0.677 + ], + "angle": 0, + "content": "To investigate the impact of prompting on LLM-based relevance judgment, we collected data from both human participants and LLMs, ensuring that the task objective remained clear and consistent (sharing the same intent) across all participants. We prepared guidelines for prompt writing1, which provides detailed explanations of the three relevance judgment tasks: 1) Binary relevance — a passage is either relevant (1) or not relevant (0) to a query. 2) Graded relevance — a passage is rated on a 0-3 scale, where 3 indicates perfect relevance to the query. 3) Pairwise relevance — given two passages, chose the passage more relevant to the query. In the guideline, each task is illustrated with examples from the TREC Deep Learning 2019 [18], helping to ensure that both humans and LLMs had a well-defined understanding of the task. These examples could also be used as (few shot) examples if desired." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.678, + 0.483, + 0.789 + ], + "angle": 0, + "content": "The guidelines specify a Python-based format, where participants (both human and LLMs) were required to fill in structured Python dictionaries. More specifically, participants had to provide both the \"system message\" and \"user message\" fields for the prompts, following the format commonly used in LLM-based prompting (e.g., OpenAI models and open-source alternatives such as those from Ollama). This structured approach ensures compatibility across different LLM implementations." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.789, + 0.483, + 0.872 + ], + "angle": 0, + "content": "We recruited 15 human participants, each of whom had at least a Master's degree in computer science, were fluent in English, and had prior experience working with LLMs via API usage or coding. Additionally, these participants had previously published at least one paper in an IR-focused conference. Each participant received a $10 gift card as a token of appreciation for their time and effort." + }, + { + "type": "table_caption", + "bbox": [ + 0.548, + 0.105, + 0.88, + 0.12 + ], + "angle": 0, + "content": "Table 1: List of LLMs used for prompt generation." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.133, + 0.925, + 0.174 + ], + "angle": 0, + "content": "
GPT-4oGPT-4o MiniClaude 3.5LLaMA 3.2Phi-4
Mistral-largeDeepSeek-v3Amazon-Nova-Pro-v1Gemma-2-9bGrok-2
Gemini 2Jamba-1.5Athene-v2GPT01GPT01 Mini
" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.234, + 0.915, + 0.388 + ], + "angle": 0, + "content": "For prompt creation, we also used 15 different LLMs from the ChatBotArena² platform [12], which enables the execution of various LLMs online. We provided the same data collection guideline to the LMMs, including the task description and examples, ensuring that the LLMs received identical instructions to those given to human participants. Similar to human participants, each LLM was asked to complete the \"system message\" and \"user message\" fields in our Python function for relevance judgment. This setup allows us to systematically compare the impact of prompting across both groups. Table 1 provides the list of LLMs we used in this experiment for generating prompts for relevance judgments." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.408, + 0.74, + 0.425 + ], + "angle": 0, + "content": "2.2 Filtering and cleaning" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.427, + 0.915, + 0.622 + ], + "angle": 0, + "content": "To maintain consistency, we did not modify or provide additional instructions for any LLMs or human participants. Among the LLMs, two failed to complete the task because they deemed the task to be inappropriate, or repeatedly asked about examples. Among human participants, only one used a few-shot approach with examples. The rest did not provide any examples in their prompts. When testing the outputs of the collected prompts, not all of them were able to generate the expected format cleanly. Some prompts produced responses that required additional cleaning, such as verbose outputs like \"The passage is relevant, so the answer is: 1\" instead of simply returning 1. To ensure consistency, we examined the all generated output and applied necessary cleaning. After filtering and cleaning, we finalized 12 human-generated prompts and 12 LLM-generated prompts for use in our experiments." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.643, + 0.702, + 0.659 + ], + "angle": 0, + "content": "2.3 Prompt Diversity" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.661, + 0.915, + 0.897 + ], + "angle": 0, + "content": "To better understand the variation in prompts, we examined the diversity of both human-generated and LLM-generated prompts. Specifically, we analyzed both user prompts and system prompts separately, as they serve distinct roles in guiding the LLM's response. In a prompt the user message provides the direct instructions given to the model, specifying what information is needed. In contrast, the system message provides context for the task, defining the LLM's role and expected behavior (e.g., \"You are an expert relevance judgment assessor\"). Figure 1 illustrates the distribution of unique terms used across all human-generated (in green) and LLM-generated (in red) prompts. As shown in this figure, human-generated prompts exhibit greater diversity in wording when compared to LLM-generated ones. This suggests that humans introduce more nuanced descriptions and varied phrasing when defining the task, while LLM-generated system prompts tend to rely on more standardized language. Additionally, system messages exhibit greater lexical diversity compared to user messages." + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.884, + 0.2, + 0.896 + ], + "angle": 0, + "content": "1https://bit.ly/4hP0EMg" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.077, + 0.522, + 0.088 + ], + "angle": 0, + "content": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment" + }, + { + "type": "header", + "bbox": [ + 0.724, + 0.077, + 0.912, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + }, + { + "type": "image", + "bbox": [ + 0.123, + 0.11, + 0.436, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.086, + 0.244, + 0.484, + 0.273 + ], + "angle": 0, + "content": "Figure 1: Diversity of words across human and LLM-generated prompts." + }, + { + "type": "title", + "bbox": [ + 0.086, + 0.298, + 0.336, + 0.314 + ], + "angle": 0, + "content": "3 Experimental Methodology" + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.317, + 0.482, + 0.578 + ], + "angle": 0, + "content": "Data We utilize the TREC Deep Learning Track datasets from 2020 and 2021. The DL-20 dataset contains 54 judged queries with 11,386 relevance assessments from MS MARCO V1 collection, while the DL-21 dataset includes 53 judged queries and 10,828 assessments from MS MARCO V2. Both datasets have been manually annotated by NIST assessors following the TREC relevance judgment guidelines. The assessors evaluate each document-query pair based on a graded relevance scale, ranging from not relevant (0) to highly relevant (3). The assessment process involves pooling top-ranked documents from multiple retrieval systems, which were then judged by human annotators. Using this data allows us to compare the three different variations of LLM-based judgments i.e., binary, graded, and pairwise. For graded relevance, we compare against the actual graded labels. For binary judgments, following prior work [19, 37], we classify levels 2 and 3 as relevant and levels 0 and 1 as non-relevant. For pairwise judgments, we compare documents with different relevance levels, assuming that a document with a higher relevance level should be ranked as more relevant than one with a lower relevance level." + }, + { + "type": "text", + "bbox": [ + 0.086, + 0.58, + 0.482, + 0.649 + ], + "angle": 0, + "content": "LLMs for Relevance Judgments. To perform relevance assessment, we employed three different LLMs: one commercial model, GPT-40, and two open-source models, LLaMA 3.2-3B and Mistral-7B. We implemented our experiments using OpenAI and Ollama, running all prompts with a temperature setting of 0." + }, + { + "type": "text", + "bbox": [ + 0.087, + 0.65, + 0.483, + 0.87 + ], + "angle": 0, + "content": "Data Sampling. We conducted experiments on all query-document pairs for binary and graded relevance judgments using the open-source models. However, due to computational constraints, we were unable to run all 24 valid prompts across all query-document pairs for GPT-40. Instead, we randomly sampled up to 10 documents per query for each of the four relevance levels (0-3). If fewer than 10 documents were available for a given relevance level, we included all available documents. For pairwise judgments, evaluating all possible pairs was not feasible due to their quadratic growth. Instead, we categorized documents for each query into three groups: \"highly relevant\", \"relevant\", and \"non-relevant\". The \"highly relevant\" category corresponds to the highest available relevance level for that query, which in TREC-style annotations could be level 3 or level 2, depending on availability. The \"non-relevant\" category includes all level 0 documents, while any intermediate relevance level (typically level 1, or levels 1 and 2 if level 3 exists) was classified as \"relevant\"." + }, + { + "type": "table_caption", + "bbox": [ + 0.518, + 0.105, + 0.912, + 0.133 + ], + "angle": 0, + "content": "Table 2: Mean and variance of agreement between LLM-based and human relevance judgments across different settings." + }, + { + "type": "table", + "bbox": [ + 0.52, + 0.146, + 0.935, + 0.263 + ], + "angle": 0, + "content": "
Modelcrafted byBinaryGradedPairwise
MeanVarianceMeanVarianceMeanVariance
GPT-4oLLM0.4340.0030.2150.0010.8490.000
Human0.2700.0980.2150.0010.5780.139
LLaMA 3.2LLM0.3030.0100.0330.0020.4390.066
Human0.1670.0410.1020.0030.3300.073
MistralLLM0.4050.0010.0080.0040.5740.014
Human0.2430.0510.0040.0050.4420.073
" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.296, + 0.913, + 0.475 + ], + "angle": 0, + "content": "From these three categories, we constructed document pairs for pairwise judgments. Specifically, we sampled 10 pairs per query from each of the following comparisons: \"highly relevant vs. non-relevant\", \"relevant vs. non-relevant\", and \"highly relevant vs. relevant\" (up to 30 pairs in total). If fewer than 10 pairs were available for a given comparison, we included as many as possible. Additionally, for the pairwise setting, we minimized positional bias by evaluating each document pair twice, swapping the order of the documents in the second run. The result is counted as \"agree\" if the LLM favors the more relevant passage in both comparisons, \"tie\" if the LLM's decisions are inconsistent when the passage order is swapped, and \"disagree\" if the LLM consistently selects the passage with a lower relevance level assigned by human annotators." + }, + { + "type": "title", + "bbox": [ + 0.518, + 0.491, + 0.714, + 0.507 + ], + "angle": 0, + "content": "4 Results and Findings" + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.509, + 0.913, + 0.743 + ], + "angle": 0, + "content": "In order to explore the research questions raised in the introduction, we investigated the agreement of LLM-based relevance judgments from different prompts with human annotations on TREC 2020 and 2021 using three different LLMs, as shown in Figure 2. For binary and graded relevance judgments, agreement is measured using Cohen's Kappa \\((\\kappa)\\). For pairwise judgments, since the task involves assessing agreement with the actual ranking of pairs, we report the percentage of cases where the LLM's preference agrees with the expected order. In this figure, the leftmost two columns represent the results for binary, the middle two columns correspond to graded, and the rightmost two columns display the results from pairwise relevance judgment. The green, blue, and red bars indicate agreement for GPT-4o, LLAMA 3.2, and Mistral, respectively. In each pair of plots, the left plot presents results for DL-20, while the right plot corresponds to DL-21. The bottom 12 bars represent prompts crafted by LLMs; on top of them there are 12 bars corresponding to prompts created by humans." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.745, + 0.913, + 0.895 + ], + "angle": 0, + "content": "In addition to results from the human- and LLM-written prompts, we also report the results of UMBRELA assessments at the top of the graded relevance sub-figure (middle). UMBRELA is an open-source reproduction of Microsoft's Bing LLM-based relevance assessor [35], designed to automate relevance judgments effectively [36, 37]. It follows a structured prompting approach and has demonstrated high correlation with both human annotations and system rankings across multiple TREC Deep Learning Tracks (2019-2023). Notably, UMBRELA has been integrated into TREC 2024 RAG for automated evaluation, which further validated its reliability as an alternative to human assessors. We consider UMBRELA a reliable and effective" + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.884, + 0.179, + 0.896 + ], + "angle": 0, + "content": "2https://Imarena.ai/" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.275, + 0.087 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.714, + 0.076, + 0.914, + 0.087 + ], + "angle": 0, + "content": "Negar Arabzadeh and Charles L.A. Clarke" + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.106, + 0.9, + 0.549 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.081, + 0.563, + 0.913, + 0.62 + ], + "angle": 0, + "content": "Figure 2: Agreement of LLM-based relevance judgments with human annotations across different prompts and relevance judgment tasks. UMBRELA represents the reproduction of Bing's LLM assessor introduced in [37]. Otherwise, the top 12 bars \\((\\mathbf{H}^{*})\\) represent human-crafted prompts, while the bottom 12 correspond to LLM-generated prompts. The dashed lines show the mean of agreement in LLM -crafted prompts and human-crafted prompts separately." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.64, + 0.483, + 0.709 + ], + "angle": 0, + "content": "prompt and we believe comparing its performance against human-crafted and LLM-generated prompts in graded relevance judgments would bring interesting insights. Additionally, Table 2 summarizes Figure 2 by providing the mean and variance of agreement scores across the two datasets and different relevance judgments." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.709, + 0.483, + 0.737 + ], + "angle": 0, + "content": "We now consider investigating each of our research questions in light of these agreement results." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.737, + 0.483, + 0.89 + ], + "angle": 0, + "content": "RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Figure 2 and Table 2 reveal significant variance across different LLM-based relevance judgment approaches. Binary and pairwise methods exhibit the least sensitivity to input prompts, maintaining more consistent agreement. In contrast, graded relevance judgments are highly sensitive to prompt variations. We note that while binary and pairwise methods operate with only two choices, graded relevance introduces greater variability. Particularly on graded judgments, GPT-40 demonstrates relatively stable performance but LLaMA 3.2 and Mistral show considerable fluctuations across different prompts." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.64, + 0.915, + 0.764 + ], + "angle": 0, + "content": "RQ2. LLMs as Prompt Generators: Table 2 shows that LLM-generated prompts generally yield higher average agreement with human annotations. However, for graded relevance judgments, the difference is minimal. This may be due to (i) participants' greater familiarity with graded assessments or (ii) the inherently subjective nature of assigning relevance levels, which may require more calibration with human annotators. Additionally, LLM-generated prompts exhibit lower variance in agreement compared to human-crafted prompts, indicating less sensitivity to prompt variations." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.764, + 0.915, + 0.875 + ], + "angle": 0, + "content": "RQ3. Prompt Robustness Across LLMs: Figure 3 analyzes inter-agreement rates among different prompt groups using Krippendorff's alpha. Here we measure agreement between different prompt's output, regardless of their alignment with human judgments. The results show that LLM-generated prompts exhibit higher inter-agreement than human-crafted ones, likely due to the greater linguistic diversity in human-generated prompts, as seen in Figure 1. This suggests that LLM-generated prompts are more robust" + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.64, + 0.915, + 0.875 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.524, + 0.088 + ], + "angle": 0, + "content": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment" + }, + { + "type": "header", + "bbox": [ + 0.724, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + }, + { + "type": "image", + "bbox": [ + 0.124, + 0.108, + 0.436, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.267, + 0.483, + 0.297 + ], + "angle": 0, + "content": "Figure 3: Krippendorff's inter-agreement rate between all the prompts on two datasets." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.317, + 0.483, + 0.496 + ], + "angle": 0, + "content": "than human-crafted ones. While some human-crafted prompts performed well across all models, prompt effectiveness varies significantly between LLMs, with no single prompt consistently excelling across all models. However, for graded assessments, UMBRELA consistently demonstrated high performance across different LLMs and it emerged as one of the most effective prompts across all models. UMBRELA had previously shown strong correlation with human judgments on TREC DL tracks [37]. We hypothesize that UMBRELA's strong and consistent performance may stem from how its prompt deconstructs the concept of relevance into finer-grained aspects, such as trustworthiness and alignment with intent. This structured approach likely prevents the LLM from relying on its own interpretation of relevance." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.497, + 0.483, + 0.609 + ], + "angle": 0, + "content": "RQ4. Model-Specific Sensitivity to Prompts: From Figure 2, we observe that GPT-4o demonstrates high consistency across most prompts and all relevance assessment approaches. In contrast, the performance of LLaMA 3.2 and Mistral varies significantly depending on the prompt and assessment method. This variability is further confirmed by the variance of agreement reported in Table 2. Notably, GPT-4o exhibits consistently low variance in agreement, particularly when prompted with LLM-crafted prompts." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.62, + 0.342, + 0.635 + ], + "angle": 0, + "content": "5 Conclusion and Limitations" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.638, + 0.483, + 0.805 + ], + "angle": 0, + "content": "In this study, we investigated the sensitivity of LLM-based relevance judgments to different prompting strategies across multiple models. We examined how prompts, whether human- or LLM-generated, influence judgment effectiveness, their robustness across different LLMs, and the extent to which models exhibit variability in response to prompt modifications. One specific outcome is to confirm the performance of UMBRELA as a leading prompt for LLM-based graded relevance assessment. Despite these contributions, our study has limitations. Our human participants primarily had a computer science background with experience writing prompts for LLMs. Additionally, we evaluated only three LLMs as judges, limiting the generalizability of our findings." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.816, + 0.178, + 0.829 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.832, + 0.483, + 0.864 + ], + "angle": 0, + "content": "[1] Marwah Alaofi, Negar Arabzadeh, Charles LA Clarke, and Mark Sanderson. 2024. Generative information retrieval evaluation. In Information Access in the Era of Generative AI. Springer, 135-159." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.864, + 0.482, + 0.895 + ], + "angle": 0, + "content": "[2] Neger Arabzadeh, Amin Bigdeli, and Charles L. A. Clarke. 2024. Adapting Standard Retrieval Benchmarks to Evaluate Generated Answers. In 46th European Conference on Information Retrieval. Glasgow, Scotland." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.832, + 0.483, + 0.895 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.109, + 0.915, + 0.13 + ], + "angle": 0, + "content": "[3] Negar Arabzadeh and Charles LA Clarke. 2024. A Comparison of Methods for Evaluating Generative IR. arXiv preprint arXiv:2404.04044 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.131, + 0.915, + 0.201 + ], + "angle": 0, + "content": "[4] Negar Arabzadeh, Siqing Huo, Nikhil Mehta, Qingyun Wu, Chi Wang, Ahmed Hassan Awadallah, Charles L. A. Clarke, and Julia Kiseleva. 2024. Assessing and Verifying Task Utility in LLM-Powered Applications. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (Eds.). Association for Computational Linguistics, Miami, Florida, USA, 21868-21888. doi:10.18653/v1/2024.emnlp-main.1219" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.201, + 0.914, + 0.242 + ], + "angle": 0, + "content": "[5] Simran Arora, Avanika Narayan, Mayee F. Chen, Laurel Orr, Neel Guha, Kush Bhatia, Ines Chami, Frederic Sala, and Christopher Re. 2022. Ask Me Anything: A simple strategy for prompting language models. arXiv:2210.02441 [cs.CL] https://arxiv.org/abs/2210.02441" + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.242, + 0.914, + 0.281 + ], + "angle": 0, + "content": "[6] Leif Azzopardi, Charles LA Clarke, Paul Kantor, Bhaskar Mitra, Johanne R Trippas, Zhaochun Ren, Mohammad Aliennejadi, Negar Arabzadeh, Raman Chandrasekar, Maarten de Rijke, et al. 2024. Report on The Search Futures Workshop at ECIR 2024. In ACM SIGIR Forum, Vol. 58. ACM New York, NY, USA, 1-41." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.281, + 0.914, + 0.312 + ], + "angle": 0, + "content": "[7] Chris Buckley and Ellen M Voorhees. 2004. Retrieval evaluation with incomplete information. In Proceedings of the 27th annual international ACM SIGIR conference on Research and development in information retrieval. 25-32." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.312, + 0.914, + 0.342 + ], + "angle": 0, + "content": "[8] Ben Carterette, Paul N. Bennett, David Maxwell Chickering, and Susan T. Dumais. 2008. Here or there: Preference judgments for Relevance. Computer Science Department Faculty Publication Series 46. University of Massachusetts Amherst." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.342, + 0.914, + 0.382 + ], + "angle": 0, + "content": "[9] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. 2024. A survey on evaluation of large language models. ACM Transactions on Intelligent Systems and Technology 15, 3 (2024), 1-45." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.382, + 0.914, + 0.412 + ], + "angle": 0, + "content": "[10] Anowoy Chatterjee, HSVNS Kowndinya Renduchintala, Sumit Bhatia, and Tanmoy Chakraborty. 2024. POSIX: A Prompt Sensitivity Index For Large Language Models. arXiv preprint arXiv:2410.02185 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.412, + 0.914, + 0.432 + ], + "angle": 0, + "content": "[11] Cheng-Han Chiang and Hung-yi Lee. 2023. Can large language models be an alternative to human evaluations? arXiv preprint arXiv:2305.01937 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.432, + 0.914, + 0.472 + ], + "angle": 0, + "content": "[12] Wei-Lin Chiang, Lianmin Zheng, Ying Sheng, Anastasios Nikolas Angelopoulos, Tianle Li, Dacheng Li, Hao Zhang, Banghua Zhu, Michael Jordan, Joseph E Gonzalez, et al. 2024. Chatbot arena: An open platform for evaluating llms by human preference. arXiv preprint arXiv:2403.04132 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.472, + 0.914, + 0.502 + ], + "angle": 0, + "content": "[13] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing Top-\\(k\\) Preferences. ACM Trans. Inf. Syst. 39, 3, Article 33 (may 2021), 21 pages. doi:10.1145/3451161" + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.503, + 0.914, + 0.523 + ], + "angle": 0, + "content": "[14] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing top-\\(k\\) preferences. ACM Transactions on Information Systems 39, 3 (July 2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.523, + 0.914, + 0.553 + ], + "angle": 0, + "content": "[15] Cyril W Cleverdon. 1991. The significance of the Cranfield tests on index languages. In Proceedings of the 14th annual international ACM SIGIR conference on Research and development in information retrieval. 3-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.553, + 0.914, + 0.582 + ], + "angle": 0, + "content": "[16] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the TREC 2020 deep learning track. arXiv:2102.07662 [cs.IR] https://arxiv.org/abs/2102.07662" + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.583, + 0.914, + 0.623 + ], + "angle": 0, + "content": "[17] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Jimmy Lin. 2022. Overview of the TREC 2021 deep learning track. In Text REtrieval Conference (TREC). NIST, TREC. https://www.microsoft.com/en-us/research/publication/overview-of-the-trec-2021-deep-learning-track/" + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.623, + 0.914, + 0.654 + ], + "angle": 0, + "content": "[18] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M Voorhees. 2020. Overview of the TREC 2019 deep learning track. arXiv preprint arXiv:2003.07820 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.654, + 0.914, + 0.703 + ], + "angle": 0, + "content": "[19] Gugliemo Faggioli, Laura Dietz, Charles LA Clarke, Gianluca Demartini, Matthias Hagen, Claudia Hauff, Noriko Kando, Evangelos Kanoulas, Martin Potthast, Benno Stein, et al. 2023. Perspectives on large language models for relevance judgment. In Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval. 39-50." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.703, + 0.914, + 0.734 + ], + "angle": 0, + "content": "[20] Naghmeh Farzi and Laura Dietz. 2024. Pencils down! automatic rubric-based evaluation of retrieve/generate systems. In Proceedings of the 2024 ACM SIGIR International Conference on Theory of Information Retrieval. 175-184." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.734, + 0.914, + 0.754 + ], + "angle": 0, + "content": "[21] David Hawking, Ellen Voorhees, Nick Craswell, Peter Bailey, et al. 1999. Overview of the trec-8 web track. In TREC." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.754, + 0.914, + 0.794 + ], + "angle": 0, + "content": "[22] Gabriella Kazai, Emine Yilmaz, Nick Craswell, and S.M.M. Tahaghoghi. 2013. User Intent and Assessor Disagreement in Web Search Evaluation. In 22nd ACM International Conference on Information and Knowledge Management. San Francisco, California, 699-708." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.794, + 0.914, + 0.825 + ], + "angle": 0, + "content": "[23] Alina Leidinger, Robert van Rooij, and Ekaterina Shutova. 2023. The language of prompting: What linguistic properties make a prompt successful? arXiv:2311.01967 [cs.CL] https://arxiv.org/abs/2311.01967" + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.825, + 0.914, + 0.865 + ], + "angle": 0, + "content": "[24] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. 2024. From Generation to Judgment: Opportunities and Challenges of LLM-as-a-judge. arXiv preprint arXiv:2411.16594 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.865, + 0.914, + 0.896 + ], + "angle": 0, + "content": "[25] Sheng Lu, Hendrik Schuff, and Iryna Gurevych. 2024. How are Prompts Different in Terms of Sensitivity? In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language" + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.109, + 0.915, + 0.896 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.275, + 0.087 + ], + "angle": 0, + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + }, + { + "type": "header", + "bbox": [ + 0.714, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "Negar Arabzadeh and Charles L.A. Clarke" + }, + { + "type": "ref_text", + "bbox": [ + 0.108, + 0.109, + 0.482, + 0.14 + ], + "angle": 0, + "content": "Technologies (Volume 1: Long Papers), Kevin Duh, Helena Gomez, and Steven Bethard (Eds.). Association for Computational Linguistics, Mexico City, Mexico, 5833-5856. doi:10.18653/v1/2024.nacl-long.325" + }, + { + "type": "ref_text", + "bbox": [ + 0.086, + 0.14, + 0.483, + 0.171 + ], + "angle": 0, + "content": "[26] Sean MacAvaney and Luca Soldaini. 2023. One-shot labeling for automatic relevance estimation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2230-2235." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.482, + 0.211 + ], + "angle": 0, + "content": "[27] Chuan Meng, Negar Arabzadeh, Arian Askari, Mohammad Aliannejadi, and Maarten de Rijke. 2024. Query Performance Prediction using Relevance Judgments Generated by Large Language Models. arXiv preprint arXiv:2404.01012 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.211, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[28] Hossein A Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles LA Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Llm4eval: Large language model for evaluation in ir. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 3040-3043." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.261, + 0.482, + 0.312 + ], + "angle": 0, + "content": "[29] Hossein A. Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles L. A. Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Report on the 1st Workshop on Large Language Model for Evaluation in Information Retrieval (LLM4Eval 2024) at SIGIR 2024. arXiv:2408.05388 [cs.IR] https://arxiv.org/abs/2408.05388" + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.482, + 0.342 + ], + "angle": 0, + "content": "[30] Amirhossein Razavi, Mina Soltangheis, Neger Arabzadeh, Sara Salamat, Morteza Zihayat, and Ebrahim Bagheri. 2025. Benchmarking Prompt Sensitivity in Large Language Models. arXiv preprint arXiv:2502.06065 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.342, + 0.482, + 0.372 + ], + "angle": 0, + "content": "[31] Tetsuya Sakai and Zhaohao Zeng. 2020. Good evaluation measures based on document preferences. In 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 359-368." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.372, + 0.482, + 0.402 + ], + "angle": 0, + "content": "[32] Alireza Salemi and Hamed Zamani. 2024. Evaluating retrieval quality in retrieval-augmented generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2395-2400." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.482, + 0.422 + ], + "angle": 0, + "content": "[33] David P Sander and Laura Dietz. 2021. EXAM: How to Evaluate Retrieve-and-Generate Systems for Users Who Do Not (Yet) Know What They Want.. In" + }, + { + "type": "list", + "bbox": [ + 0.086, + 0.109, + 0.483, + 0.422 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.543, + 0.11, + 0.631, + 0.12 + ], + "angle": 0, + "content": "DESIREs. 136-146." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.12, + 0.915, + 0.16 + ], + "angle": 0, + "content": "[34] Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. 2023. Quantifying Language Models' Sensitivity to Spurious Features in Prompt Design or: How I learned to start worrying about prompt formatting. arXiv preprint arXiv:2310.11324 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.16, + 0.914, + 0.19 + ], + "angle": 0, + "content": "[35] Paul Thomas, Seth Spielman, Nick Craswell, and Bhaskar Mitra. 2023. Large Language Models Can Accurately Predict Searcher Preferences. arXiv preprint arXiv:2309.10621 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.19, + 0.914, + 0.231 + ], + "angle": 0, + "content": "[36] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Daniel Campos, Nick Craswell, Ian Soboroff, Hoa Trang Dang, and Jimmy Lin. 2024. A Large-Scale Study of Relevance Assessments with Large Language Models: An Initial Look. arXiv:2411.08275 [cs.IR] https://arxiv.org/abs/2411.08275" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.231, + 0.914, + 0.261 + ], + "angle": 0, + "content": "[37] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Nick Craswell, and Jimmy Lin. 2024. UMBRELA: Umbrela is the (Open-Source Reproduction of the) Bing RELevance Assessor. arXiv preprint arXiv:2406.06519 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.261, + 0.914, + 0.281 + ], + "angle": 0, + "content": "[38] Ellen M Voorhees. 2000. Report on trec-9. In ACM SIGIR Forum, Vol. 34. ACM New York, NY, USA, 1-8." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.281, + 0.914, + 0.322 + ], + "angle": 0, + "content": "[39] Xiaohui Xie, Jiaxin Mao, Yiqun Liu, Maarten de Rijke, Haitian Chen, Min Zhang, and Shaoping Ma. 2020. Preference-based evaluation metrics for web image search. In 43st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval. Xi'an, China." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.322, + 0.914, + 0.362 + ], + "angle": 0, + "content": "[40] Xinyi Yan, Chengxi Luo, Charles L. A. Clarke, Nick Craswell, Ellen M. Voorhees, and Pablo Castells. 2022. Human Preferences as Dueling Bandits. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '22). ACM. doi:10.1145/3477495.3531991" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.362, + 0.914, + 0.403 + ], + "angle": 0, + "content": "[41] Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang, and Michael Berdersky. 2023. Beyond Yes and No: Improving Zero-Shot LLM Rankers via Scoring Fine-Grained Relevance Labels. arXiv preprint arXiv:2310.14122 (2023)." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.11, + 0.915, + 0.403 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_origin.pdf b/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..cb1f1122274c1e686a58236c23d5ed52c7755d8f --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/afaa1baf-15c0-4d8f-949b-b6edf18db129_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f208d805bbd446763461ae2da8b04bccfd1360f61ba44f5421641aa3dc1a441 +size 575723 diff --git a/data/2025/2504_12xxx/2504.12408/full.md b/data/2025/2504_12xxx/2504.12408/full.md new file mode 100644 index 0000000000000000000000000000000000000000..446a4cc400a453d476ce7d3fcadd7a6ee4589f9e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/full.md @@ -0,0 +1,181 @@ +# A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment + +Negar Arabzadeh + +narabzad@uwaterloo.ca + +University of Waterloo + +Waterloo, Ontario, Canada + +Charles L.A. Clarke + +claclark@uwaterloo.ca + +University of Waterloo + +Waterloo, Ontario, Canada + +# Abstract + +Large Language Models (LLMs) are increasingly used to automate relevance judgments for information retrieval (IR) tasks, often demonstrating agreement with human labels that approaches interhuman agreement. To assess the robustness and reliability of LLM-based relevance judgments, we systematically investigate impact of prompt sensitivity on the task. We collected prompts for relevance assessment from 15 human experts and 15 LLMs across three tasks — binary, graded, and pairwise — yielding 90 prompts in total. After filtering out unusable prompts from three humans and three LLMs, we employed the remaining 72 prompts with three different LLMs as judges to label document/query pairs from two TREC Deep Learning Datasets (2020 and 2021). We compare LLM-generated labels with TREC official human labels using Cohen's $\kappa$ and pairwise agreement measures. In addition to investigating the impact of prompt variations on agreement with human labels, we compare human- and LLM-generated prompts and analyze differences among different LLMs as judges. We also compare human- and LLM-generated prompts with the standard UMBRELA prompt used for relevance assessment by Bing and TREC 2024 Retrieval Augmented Generation (RAG) Track. To support future research in LLM-based evaluation, we release all data and prompts at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/. + +# CCS Concepts + +- Information systems $\rightarrow$ Evaluation of retrieval results; Relevance assessment; Test collections. + +# Keywords + +Large Language Models, Relevance Judgments, Evaluation + +# 1 Introduction + +Large Language Models (LLMs) are increasingly used for evaluation across various domains, including natural language processing and automated content assessment [1, 4, 9, 11, 28, 32]. The information retrieval (IR) community has been an early adopter of LLMs for relevance assessment [19, 24, 27, 35, 41]. Numerous studies have + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +SIGIR '25, Padua, Italy + +© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. + +ACM ISBN 979-8-4007-1592-1/2025/07 + +https://doi.org/10.1145/3726302.3730159 + +confirmed that LLM-generated relevance labels closely align with human labels under multiple measures of agreement [26, 36, 37]. + +Nonetheless, despite the widespread adoption of LLMs for relevance assessment, prompting strategies vary substantially across studies [2, 3, 20, 33]. An experiment reported at the LLM4Eval Workshop in SIGIR 2024 on Large Language Models for Evaluation in Information Retrieval [29], analyzed how different prompts influence agreement with human judgments and system rankings [28]. While multiple studies have examined how LLMs respond to different prompting strategies [5, 10, 23, 25, 34], these studies have generally been conducted with prompts tuned to specific LLMs and collections, or where prompt variants are constrained by templates [6]. As a complement to these studies, we report on a study of prompts from a variety of independent sources that have not been tuned to LLMs or collections, allowing us to examine the robustness of LLM-based relevance assessment under different prompting strategies. This investigation also allows us to compare different LLMs as judges to determine the degree to which different LLMs are sensitive to prompt modifications. + +We collected and analyzed prompts generated by both human experts and LLMs themselves. We designed a guideline for prompting LLMs to perform relevance assessment following three different approaches: binary, graded, and pairwise. While most previous studies have focused on graded relevance, we believe it is crucial to explore a wider range of relevance assessment methods, as they have proven effective in assessing different scenarios in the evaluation of information-seeking systems [7, 8, 13-15, 21, 22, 31, 38-40]. As a benefit to employing LLMs for relevance assessment, it becomes easier to explore different approaches to relevance assessment since human judges do not need to be recruited and trained separately for each approach. + +We recruited 15 human participants to create prompts for each of the three assessment approaches. As part of the recruitment process, we ensured that the participants were familiar with prompt engineering and relevance assessment principles, as detailed in Section 2. As a result of this inclusion criteria for recruitment, most participants were drawn from three academia NLP/IR labs. We also collected prompts from 15 different open source and commercial LLMs. Our primary goal is to understand prompt sensitivity in LLM-based relevance judgment [30], including its impact, robustness, and variation across different LLMs. Additionally, we explore the effectiveness of LLM as prompt generators. + +We performed relevance judgment experiments using data from two years of the TREC Deep Learning Track: DL 2020 [16], and DL 2021 [17]. Using the prompts created by both human participants and LLMs, we conducted relevance assessments on query-document pairs from these datasets using two open-source LLMs - LLaMA + +3.2-3b and Mistral 7b - and one commercial LLM GPT-4o. Our experiment incorporates the three approaches to relevance assessment (binary, graded, and pairwise) with prompts from both humans and LLMs using three different LLMs as judges. Through our experiments, we address the following research questions: + +- RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Given a clear task objective, how do different prompts influence the effectiveness of each approach to LLM-based relevance judgment? +- RQ2. LLMs as Prompt Generators: How effective are LLM-generated prompts for relevance judgment, and how do they compare to human-crafted prompts? +- RQ3. Prompt Robustness Across LLMs: Are there prompts that consistently perform well across different LLMs, regardless of the model used as a judge? +- RQ4. Model-Specific Sensitivity to Prompts: Is prompt sensitivity consistent across all models, or do some LLMs show greater variability in performance? + +To ensure reproducibility, we have made all data and experimental artifacts publicly available at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/. The study reported in this paper, and its associated data release, has received ethics clearance as human subjects research from our institution. + +# 2 Prompt Creation + +# 2.1 Prompt generation + +To investigate the impact of prompting on LLM-based relevance judgment, we collected data from both human participants and LLMs, ensuring that the task objective remained clear and consistent (sharing the same intent) across all participants. We prepared guidelines for prompt writing1, which provides detailed explanations of the three relevance judgment tasks: 1) Binary relevance — a passage is either relevant (1) or not relevant (0) to a query. 2) Graded relevance — a passage is rated on a 0-3 scale, where 3 indicates perfect relevance to the query. 3) Pairwise relevance — given two passages, chose the passage more relevant to the query. In the guideline, each task is illustrated with examples from the TREC Deep Learning 2019 [18], helping to ensure that both humans and LLMs had a well-defined understanding of the task. These examples could also be used as (few shot) examples if desired. + +The guidelines specify a Python-based format, where participants (both human and LLMs) were required to fill in structured Python dictionaries. More specifically, participants had to provide both the "system message" and "user message" fields for the prompts, following the format commonly used in LLM-based prompting (e.g., OpenAI models and open-source alternatives such as those from Ollama). This structured approach ensures compatibility across different LLM implementations. + +We recruited 15 human participants, each of whom had at least a Master's degree in computer science, were fluent in English, and had prior experience working with LLMs via API usage or coding. Additionally, these participants had previously published at least one paper in an IR-focused conference. Each participant received a $10 gift card as a token of appreciation for their time and effort. + +Table 1: List of LLMs used for prompt generation. + +
GPT-4oGPT-4o MiniClaude 3.5LLaMA 3.2Phi-4
Mistral-largeDeepSeek-v3Amazon-Nova-Pro-v1Gemma-2-9bGrok-2
Gemini 2Jamba-1.5Athene-v2GPT01GPT01 Mini
+ +For prompt creation, we also used 15 different LLMs from the ChatBotArena² platform [12], which enables the execution of various LLMs online. We provided the same data collection guideline to the LMMs, including the task description and examples, ensuring that the LLMs received identical instructions to those given to human participants. Similar to human participants, each LLM was asked to complete the "system message" and "user message" fields in our Python function for relevance judgment. This setup allows us to systematically compare the impact of prompting across both groups. Table 1 provides the list of LLMs we used in this experiment for generating prompts for relevance judgments. + +# 2.2 Filtering and cleaning + +To maintain consistency, we did not modify or provide additional instructions for any LLMs or human participants. Among the LLMs, two failed to complete the task because they deemed the task to be inappropriate, or repeatedly asked about examples. Among human participants, only one used a few-shot approach with examples. The rest did not provide any examples in their prompts. When testing the outputs of the collected prompts, not all of them were able to generate the expected format cleanly. Some prompts produced responses that required additional cleaning, such as verbose outputs like "The passage is relevant, so the answer is: 1" instead of simply returning 1. To ensure consistency, we examined the all generated output and applied necessary cleaning. After filtering and cleaning, we finalized 12 human-generated prompts and 12 LLM-generated prompts for use in our experiments. + +# 2.3 Prompt Diversity + +To better understand the variation in prompts, we examined the diversity of both human-generated and LLM-generated prompts. Specifically, we analyzed both user prompts and system prompts separately, as they serve distinct roles in guiding the LLM's response. In a prompt the user message provides the direct instructions given to the model, specifying what information is needed. In contrast, the system message provides context for the task, defining the LLM's role and expected behavior (e.g., "You are an expert relevance judgment assessor"). Figure 1 illustrates the distribution of unique terms used across all human-generated (in green) and LLM-generated (in red) prompts. As shown in this figure, human-generated prompts exhibit greater diversity in wording when compared to LLM-generated ones. This suggests that humans introduce more nuanced descriptions and varied phrasing when defining the task, while LLM-generated system prompts tend to rely on more standardized language. Additionally, system messages exhibit greater lexical diversity compared to user messages. + +![](images/1fc35f1fb4afa7dc61ae8b75f2e5a0925dc916b91e1fc9063dd02801e0432c0d.jpg) +Figure 1: Diversity of words across human and LLM-generated prompts. + +# 3 Experimental Methodology + +Data We utilize the TREC Deep Learning Track datasets from 2020 and 2021. The DL-20 dataset contains 54 judged queries with 11,386 relevance assessments from MS MARCO V1 collection, while the DL-21 dataset includes 53 judged queries and 10,828 assessments from MS MARCO V2. Both datasets have been manually annotated by NIST assessors following the TREC relevance judgment guidelines. The assessors evaluate each document-query pair based on a graded relevance scale, ranging from not relevant (0) to highly relevant (3). The assessment process involves pooling top-ranked documents from multiple retrieval systems, which were then judged by human annotators. Using this data allows us to compare the three different variations of LLM-based judgments i.e., binary, graded, and pairwise. For graded relevance, we compare against the actual graded labels. For binary judgments, following prior work [19, 37], we classify levels 2 and 3 as relevant and levels 0 and 1 as non-relevant. For pairwise judgments, we compare documents with different relevance levels, assuming that a document with a higher relevance level should be ranked as more relevant than one with a lower relevance level. + +LLMs for Relevance Judgments. To perform relevance assessment, we employed three different LLMs: one commercial model, GPT-40, and two open-source models, LLaMA 3.2-3B and Mistral-7B. We implemented our experiments using OpenAI and Ollama, running all prompts with a temperature setting of 0. + +Data Sampling. We conducted experiments on all query-document pairs for binary and graded relevance judgments using the open-source models. However, due to computational constraints, we were unable to run all 24 valid prompts across all query-document pairs for GPT-40. Instead, we randomly sampled up to 10 documents per query for each of the four relevance levels (0-3). If fewer than 10 documents were available for a given relevance level, we included all available documents. For pairwise judgments, evaluating all possible pairs was not feasible due to their quadratic growth. Instead, we categorized documents for each query into three groups: "highly relevant", "relevant", and "non-relevant". The "highly relevant" category corresponds to the highest available relevance level for that query, which in TREC-style annotations could be level 3 or level 2, depending on availability. The "non-relevant" category includes all level 0 documents, while any intermediate relevance level (typically level 1, or levels 1 and 2 if level 3 exists) was classified as "relevant". + +Table 2: Mean and variance of agreement between LLM-based and human relevance judgments across different settings. + +
Modelcrafted byBinaryGradedPairwise
MeanVarianceMeanVarianceMeanVariance
GPT-4oLLM0.4340.0030.2150.0010.8490.000
Human0.2700.0980.2150.0010.5780.139
LLaMA 3.2LLM0.3030.0100.0330.0020.4390.066
Human0.1670.0410.1020.0030.3300.073
MistralLLM0.4050.0010.0080.0040.5740.014
Human0.2430.0510.0040.0050.4420.073
+ +From these three categories, we constructed document pairs for pairwise judgments. Specifically, we sampled 10 pairs per query from each of the following comparisons: "highly relevant vs. non-relevant", "relevant vs. non-relevant", and "highly relevant vs. relevant" (up to 30 pairs in total). If fewer than 10 pairs were available for a given comparison, we included as many as possible. Additionally, for the pairwise setting, we minimized positional bias by evaluating each document pair twice, swapping the order of the documents in the second run. The result is counted as "agree" if the LLM favors the more relevant passage in both comparisons, "tie" if the LLM's decisions are inconsistent when the passage order is swapped, and "disagree" if the LLM consistently selects the passage with a lower relevance level assigned by human annotators. + +# 4 Results and Findings + +In order to explore the research questions raised in the introduction, we investigated the agreement of LLM-based relevance judgments from different prompts with human annotations on TREC 2020 and 2021 using three different LLMs, as shown in Figure 2. For binary and graded relevance judgments, agreement is measured using Cohen's Kappa $(\kappa)$ . For pairwise judgments, since the task involves assessing agreement with the actual ranking of pairs, we report the percentage of cases where the LLM's preference agrees with the expected order. In this figure, the leftmost two columns represent the results for binary, the middle two columns correspond to graded, and the rightmost two columns display the results from pairwise relevance judgment. The green, blue, and red bars indicate agreement for GPT-4o, LLAMA 3.2, and Mistral, respectively. In each pair of plots, the left plot presents results for DL-20, while the right plot corresponds to DL-21. The bottom 12 bars represent prompts crafted by LLMs; on top of them there are 12 bars corresponding to prompts created by humans. + +In addition to results from the human- and LLM-written prompts, we also report the results of UMBRELA assessments at the top of the graded relevance sub-figure (middle). UMBRELA is an open-source reproduction of Microsoft's Bing LLM-based relevance assessor [35], designed to automate relevance judgments effectively [36, 37]. It follows a structured prompting approach and has demonstrated high correlation with both human annotations and system rankings across multiple TREC Deep Learning Tracks (2019-2023). Notably, UMBRELA has been integrated into TREC 2024 RAG for automated evaluation, which further validated its reliability as an alternative to human assessors. We consider UMBRELA a reliable and effective + +![](images/6a0acc0eba950fda3479939265810549aec4da0f6fa29aaac59d68a2f27cbb35.jpg) +Figure 2: Agreement of LLM-based relevance judgments with human annotations across different prompts and relevance judgment tasks. UMBRELA represents the reproduction of Bing's LLM assessor introduced in [37]. Otherwise, the top 12 bars $(\mathbf{H}^{*})$ represent human-crafted prompts, while the bottom 12 correspond to LLM-generated prompts. The dashed lines show the mean of agreement in LLM -crafted prompts and human-crafted prompts separately. + +prompt and we believe comparing its performance against human-crafted and LLM-generated prompts in graded relevance judgments would bring interesting insights. Additionally, Table 2 summarizes Figure 2 by providing the mean and variance of agreement scores across the two datasets and different relevance judgments. + +We now consider investigating each of our research questions in light of these agreement results. + +RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Figure 2 and Table 2 reveal significant variance across different LLM-based relevance judgment approaches. Binary and pairwise methods exhibit the least sensitivity to input prompts, maintaining more consistent agreement. In contrast, graded relevance judgments are highly sensitive to prompt variations. We note that while binary and pairwise methods operate with only two choices, graded relevance introduces greater variability. Particularly on graded judgments, GPT-40 demonstrates relatively stable performance but LLaMA 3.2 and Mistral show considerable fluctuations across different prompts. + +RQ2. LLMs as Prompt Generators: Table 2 shows that LLM-generated prompts generally yield higher average agreement with human annotations. However, for graded relevance judgments, the difference is minimal. This may be due to (i) participants' greater familiarity with graded assessments or (ii) the inherently subjective nature of assigning relevance levels, which may require more calibration with human annotators. Additionally, LLM-generated prompts exhibit lower variance in agreement compared to human-crafted prompts, indicating less sensitivity to prompt variations. +RQ3. Prompt Robustness Across LLMs: Figure 3 analyzes inter-agreement rates among different prompt groups using Krippendorff's alpha. Here we measure agreement between different prompt's output, regardless of their alignment with human judgments. The results show that LLM-generated prompts exhibit higher inter-agreement than human-crafted ones, likely due to the greater linguistic diversity in human-generated prompts, as seen in Figure 1. This suggests that LLM-generated prompts are more robust + +![](images/32d199718463392947d076c7e80ed01b8a647bf3a5e940ced5752b21d5a01dc2.jpg) +Figure 3: Krippendorff's inter-agreement rate between all the prompts on two datasets. + +than human-crafted ones. While some human-crafted prompts performed well across all models, prompt effectiveness varies significantly between LLMs, with no single prompt consistently excelling across all models. However, for graded assessments, UMBRELA consistently demonstrated high performance across different LLMs and it emerged as one of the most effective prompts across all models. UMBRELA had previously shown strong correlation with human judgments on TREC DL tracks [37]. We hypothesize that UMBRELA's strong and consistent performance may stem from how its prompt deconstructs the concept of relevance into finer-grained aspects, such as trustworthiness and alignment with intent. This structured approach likely prevents the LLM from relying on its own interpretation of relevance. + +RQ4. Model-Specific Sensitivity to Prompts: From Figure 2, we observe that GPT-4o demonstrates high consistency across most prompts and all relevance assessment approaches. In contrast, the performance of LLaMA 3.2 and Mistral varies significantly depending on the prompt and assessment method. This variability is further confirmed by the variance of agreement reported in Table 2. Notably, GPT-4o exhibits consistently low variance in agreement, particularly when prompted with LLM-crafted prompts. + +# 5 Conclusion and Limitations + +In this study, we investigated the sensitivity of LLM-based relevance judgments to different prompting strategies across multiple models. We examined how prompts, whether human- or LLM-generated, influence judgment effectiveness, their robustness across different LLMs, and the extent to which models exhibit variability in response to prompt modifications. One specific outcome is to confirm the performance of UMBRELA as a leading prompt for LLM-based graded relevance assessment. Despite these contributions, our study has limitations. Our human participants primarily had a computer science background with experience writing prompts for LLMs. Additionally, we evaluated only three LLMs as judges, limiting the generalizability of our findings. + +# References + +[1] Marwah Alaofi, Negar Arabzadeh, Charles LA Clarke, and Mark Sanderson. 2024. Generative information retrieval evaluation. In Information Access in the Era of Generative AI. Springer, 135-159. +[2] Neger Arabzadeh, Amin Bigdeli, and Charles L. A. Clarke. 2024. Adapting Standard Retrieval Benchmarks to Evaluate Generated Answers. In 46th European Conference on Information Retrieval. Glasgow, Scotland. + +[3] Negar Arabzadeh and Charles LA Clarke. 2024. A Comparison of Methods for Evaluating Generative IR. arXiv preprint arXiv:2404.04044 (2024). +[4] Negar Arabzadeh, Siqing Huo, Nikhil Mehta, Qingyun Wu, Chi Wang, Ahmed Hassan Awadallah, Charles L. A. Clarke, and Julia Kiseleva. 2024. Assessing and Verifying Task Utility in LLM-Powered Applications. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (Eds.). Association for Computational Linguistics, Miami, Florida, USA, 21868-21888. doi:10.18653/v1/2024.emnlp-main.1219 +[5] Simran Arora, Avanika Narayan, Mayee F. Chen, Laurel Orr, Neel Guha, Kush Bhatia, Ines Chami, Frederic Sala, and Christopher Re. 2022. Ask Me Anything: A simple strategy for prompting language models. arXiv:2210.02441 [cs.CL] https://arxiv.org/abs/2210.02441 +[6] Leif Azzopardi, Charles LA Clarke, Paul Kantor, Bhaskar Mitra, Johanne R Trippas, Zhaochun Ren, Mohammad Aliennejadi, Negar Arabzadeh, Raman Chandrasekar, Maarten de Rijke, et al. 2024. Report on The Search Futures Workshop at ECIR 2024. In ACM SIGIR Forum, Vol. 58. ACM New York, NY, USA, 1-41. +[7] Chris Buckley and Ellen M Voorhees. 2004. Retrieval evaluation with incomplete information. In Proceedings of the 27th annual international ACM SIGIR conference on Research and development in information retrieval. 25-32. +[8] Ben Carterette, Paul N. Bennett, David Maxwell Chickering, and Susan T. Dumais. 2008. Here or there: Preference judgments for Relevance. Computer Science Department Faculty Publication Series 46. University of Massachusetts Amherst. +[9] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. 2024. A survey on evaluation of large language models. ACM Transactions on Intelligent Systems and Technology 15, 3 (2024), 1-45. +[10] Anowoy Chatterjee, HSVNS Kowndinya Renduchintala, Sumit Bhatia, and Tanmoy Chakraborty. 2024. POSIX: A Prompt Sensitivity Index For Large Language Models. arXiv preprint arXiv:2410.02185 (2024). +[11] Cheng-Han Chiang and Hung-yi Lee. 2023. Can large language models be an alternative to human evaluations? arXiv preprint arXiv:2305.01937 (2023). +[12] Wei-Lin Chiang, Lianmin Zheng, Ying Sheng, Anastasios Nikolas Angelopoulos, Tianle Li, Dacheng Li, Hao Zhang, Banghua Zhu, Michael Jordan, Joseph E Gonzalez, et al. 2024. Chatbot arena: An open platform for evaluating llms by human preference. arXiv preprint arXiv:2403.04132 (2024). +[13] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing Top- $k$ Preferences. ACM Trans. Inf. Syst. 39, 3, Article 33 (may 2021), 21 pages. doi:10.1145/3451161 +[14] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing top- $k$ preferences. ACM Transactions on Information Systems 39, 3 (July 2021). +[15] Cyril W Cleverdon. 1991. The significance of the Cranfield tests on index languages. In Proceedings of the 14th annual international ACM SIGIR conference on Research and development in information retrieval. 3-12. +[16] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the TREC 2020 deep learning track. arXiv:2102.07662 [cs.IR] https://arxiv.org/abs/2102.07662 +[17] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Jimmy Lin. 2022. Overview of the TREC 2021 deep learning track. In Text REtrieval Conference (TREC). NIST, TREC. https://www.microsoft.com/en-us/research/publication/overview-of-the-trec-2021-deep-learning-track/ +[18] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M Voorhees. 2020. Overview of the TREC 2019 deep learning track. arXiv preprint arXiv:2003.07820 (2020). +[19] Gugliemo Faggioli, Laura Dietz, Charles LA Clarke, Gianluca Demartini, Matthias Hagen, Claudia Hauff, Noriko Kando, Evangelos Kanoulas, Martin Potthast, Benno Stein, et al. 2023. Perspectives on large language models for relevance judgment. In Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval. 39-50. +[20] Naghmeh Farzi and Laura Dietz. 2024. Pencils down! automatic rubric-based evaluation of retrieve/generate systems. In Proceedings of the 2024 ACM SIGIR International Conference on Theory of Information Retrieval. 175-184. +[21] David Hawking, Ellen Voorhees, Nick Craswell, Peter Bailey, et al. 1999. Overview of the trec-8 web track. In TREC. +[22] Gabriella Kazai, Emine Yilmaz, Nick Craswell, and S.M.M. Tahaghoghi. 2013. User Intent and Assessor Disagreement in Web Search Evaluation. In 22nd ACM International Conference on Information and Knowledge Management. San Francisco, California, 699-708. +[23] Alina Leidinger, Robert van Rooij, and Ekaterina Shutova. 2023. The language of prompting: What linguistic properties make a prompt successful? arXiv:2311.01967 [cs.CL] https://arxiv.org/abs/2311.01967 +[24] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. 2024. From Generation to Judgment: Opportunities and Challenges of LLM-as-a-judge. arXiv preprint arXiv:2411.16594 (2024). +[25] Sheng Lu, Hendrik Schuff, and Iryna Gurevych. 2024. How are Prompts Different in Terms of Sensitivity? In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language + +Technologies (Volume 1: Long Papers), Kevin Duh, Helena Gomez, and Steven Bethard (Eds.). Association for Computational Linguistics, Mexico City, Mexico, 5833-5856. doi:10.18653/v1/2024.nacl-long.325 +[26] Sean MacAvaney and Luca Soldaini. 2023. One-shot labeling for automatic relevance estimation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2230-2235. +[27] Chuan Meng, Negar Arabzadeh, Arian Askari, Mohammad Aliannejadi, and Maarten de Rijke. 2024. Query Performance Prediction using Relevance Judgments Generated by Large Language Models. arXiv preprint arXiv:2404.01012 (2024). +[28] Hossein A Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles LA Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Llm4eval: Large language model for evaluation in ir. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 3040-3043. +[29] Hossein A. Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles L. A. Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Report on the 1st Workshop on Large Language Model for Evaluation in Information Retrieval (LLM4Eval 2024) at SIGIR 2024. arXiv:2408.05388 [cs.IR] https://arxiv.org/abs/2408.05388 +[30] Amirhossein Razavi, Mina Soltangheis, Neger Arabzadeh, Sara Salamat, Morteza Zihayat, and Ebrahim Bagheri. 2025. Benchmarking Prompt Sensitivity in Large Language Models. arXiv preprint arXiv:2502.06065 (2025). +[31] Tetsuya Sakai and Zhaohao Zeng. 2020. Good evaluation measures based on document preferences. In 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 359-368. +[32] Alireza Salemi and Hamed Zamani. 2024. Evaluating retrieval quality in retrieval-augmented generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2395-2400. +[33] David P Sander and Laura Dietz. 2021. EXAM: How to Evaluate Retrieve-and-Generate Systems for Users Who Do Not (Yet) Know What They Want.. In + +DESIREs. 136-146. +[34] Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. 2023. Quantifying Language Models' Sensitivity to Spurious Features in Prompt Design or: How I learned to start worrying about prompt formatting. arXiv preprint arXiv:2310.11324 (2023). +[35] Paul Thomas, Seth Spielman, Nick Craswell, and Bhaskar Mitra. 2023. Large Language Models Can Accurately Predict Searcher Preferences. arXiv preprint arXiv:2309.10621 (2023). +[36] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Daniel Campos, Nick Craswell, Ian Soboroff, Hoa Trang Dang, and Jimmy Lin. 2024. A Large-Scale Study of Relevance Assessments with Large Language Models: An Initial Look. arXiv:2411.08275 [cs.IR] https://arxiv.org/abs/2411.08275 +[37] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Nick Craswell, and Jimmy Lin. 2024. UMBRELA: Umbrela is the (Open-Source Reproduction of the) Bing RELevance Assessor. arXiv preprint arXiv:2406.06519 (2024). +[38] Ellen M Voorhees. 2000. Report on trec-9. In ACM SIGIR Forum, Vol. 34. ACM New York, NY, USA, 1-8. +[39] Xiaohui Xie, Jiaxin Mao, Yiqun Liu, Maarten de Rijke, Haitian Chen, Min Zhang, and Shaoping Ma. 2020. Preference-based evaluation metrics for web image search. In 43st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval. Xi'an, China. +[40] Xinyi Yan, Chengxi Luo, Charles L. A. Clarke, Nick Craswell, Ellen M. Voorhees, and Pablo Castells. 2022. Human Preferences as Dueling Bandits. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '22). ACM. doi:10.1145/3477495.3531991 +[41] Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang, and Michael Berdersky. 2023. Beyond Yes and No: Improving Zero-Shot LLM Rankers via Scoring Fine-Grained Relevance Labels. arXiv preprint arXiv:2310.14122 (2023). \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12408/images/14ed2005b76ba13d7e932f558d55c0b027ef2666ca965213ba06bad4416456ce.jpg b/data/2025/2504_12xxx/2504.12408/images/14ed2005b76ba13d7e932f558d55c0b027ef2666ca965213ba06bad4416456ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ec179ae1a49cffc9e6cd7fb8aea764d5d1c8c07 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/images/14ed2005b76ba13d7e932f558d55c0b027ef2666ca965213ba06bad4416456ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:896c03466efaad877609c7584831460f283ab9c7a1ff08cc15d53b8a8ff83e8b +size 38688 diff --git a/data/2025/2504_12xxx/2504.12408/images/1fc35f1fb4afa7dc61ae8b75f2e5a0925dc916b91e1fc9063dd02801e0432c0d.jpg b/data/2025/2504_12xxx/2504.12408/images/1fc35f1fb4afa7dc61ae8b75f2e5a0925dc916b91e1fc9063dd02801e0432c0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15db84414ec3c3312ea583701fa382252c8c9f5d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/images/1fc35f1fb4afa7dc61ae8b75f2e5a0925dc916b91e1fc9063dd02801e0432c0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56023079093f753b8dd80ac3ea00e112f55fbc39f632c80068de31b64f0357b7 +size 26001 diff --git a/data/2025/2504_12xxx/2504.12408/images/32d199718463392947d076c7e80ed01b8a647bf3a5e940ced5752b21d5a01dc2.jpg b/data/2025/2504_12xxx/2504.12408/images/32d199718463392947d076c7e80ed01b8a647bf3a5e940ced5752b21d5a01dc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..508279d8d2bb952bee4bdf05a3a2cddc12e25940 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/images/32d199718463392947d076c7e80ed01b8a647bf3a5e940ced5752b21d5a01dc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09502d510a73eeee3b392105a5778cac4fe5f55a0ebe3694891f758d0e720209 +size 27607 diff --git a/data/2025/2504_12xxx/2504.12408/images/5b718910606f79a68ac8219463a79dfb50a2541cf96d49063477e0e1ff84af54.jpg b/data/2025/2504_12xxx/2504.12408/images/5b718910606f79a68ac8219463a79dfb50a2541cf96d49063477e0e1ff84af54.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fe11981c5efdc435b4b24045141c4d9211bcc21 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/images/5b718910606f79a68ac8219463a79dfb50a2541cf96d49063477e0e1ff84af54.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:138c3754eaea783b5dfd0e4ad1b9310b08772aa9b2f28cfde2bff0e3cbcb59be +size 14714 diff --git a/data/2025/2504_12xxx/2504.12408/images/6a0acc0eba950fda3479939265810549aec4da0f6fa29aaac59d68a2f27cbb35.jpg b/data/2025/2504_12xxx/2504.12408/images/6a0acc0eba950fda3479939265810549aec4da0f6fa29aaac59d68a2f27cbb35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..56806041d87d93f85c2a967c0897c6a1c8d4d37a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/images/6a0acc0eba950fda3479939265810549aec4da0f6fa29aaac59d68a2f27cbb35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:47c9b5d333b25ed7b37aaee0edb79572d6e4bb82488fb82b7fdadc376cba1913 +size 299837 diff --git a/data/2025/2504_12xxx/2504.12408/layout.json b/data/2025/2504_12xxx/2504.12408/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e55bdf3736dab39fe0d09815360b389e84e0c8bb --- /dev/null +++ b/data/2025/2504_12xxx/2504.12408/layout.json @@ -0,0 +1,4529 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 72, + 79, + 538, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 79, + 538, + 120 + ], + "spans": [ + { + "bbox": [ + 72, + 79, + 538, + 120 + ], + "type": "text", + "content": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 138, + 129, + 227, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 129, + 227, + 141 + ], + "spans": [ + { + "bbox": [ + 138, + 129, + 227, + 141 + ], + "type": "text", + "content": "Negar Arabzadeh" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 133, + 143, + 232, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 143, + 232, + 152 + ], + "spans": [ + { + "bbox": [ + 133, + 143, + 232, + 152 + ], + "type": "text", + "content": "narabzad@uwaterloo.ca" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 135, + 155, + 230, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 155, + 230, + 165 + ], + "spans": [ + { + "bbox": [ + 135, + 155, + 230, + 165 + ], + "type": "text", + "content": "University of Waterloo" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 127, + 167, + 237, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 167, + 237, + 177 + ], + "spans": [ + { + "bbox": [ + 127, + 167, + 237, + 177 + ], + "type": "text", + "content": "Waterloo, Ontario, Canada" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 380, + 129, + 477, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 380, + 129, + 477, + 140 + ], + "spans": [ + { + "bbox": [ + 380, + 129, + 477, + 140 + ], + "type": "text", + "content": "Charles L.A. Clarke" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 381, + 142, + 476, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 142, + 476, + 152 + ], + "spans": [ + { + "bbox": [ + 381, + 142, + 476, + 152 + ], + "type": "text", + "content": "claclark@uwaterloo.ca" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 381, + 154, + 476, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 154, + 476, + 165 + ], + "spans": [ + { + "bbox": [ + 381, + 154, + 476, + 165 + ], + "type": "text", + "content": "University of Waterloo" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 373, + 167, + 484, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 167, + 484, + 177 + ], + "spans": [ + { + "bbox": [ + 373, + 167, + 484, + 177 + ], + "type": "text", + "content": "Waterloo, Ontario, Canada" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 184, + 96, + 195 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 184, + 96, + 195 + ], + "spans": [ + { + "bbox": [ + 51, + 184, + 96, + 195 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 198, + 295, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 198, + 295, + 441 + ], + "spans": [ + { + "bbox": [ + 50, + 198, + 295, + 441 + ], + "type": "text", + "content": "Large Language Models (LLMs) are increasingly used to automate relevance judgments for information retrieval (IR) tasks, often demonstrating agreement with human labels that approaches interhuman agreement. To assess the robustness and reliability of LLM-based relevance judgments, we systematically investigate impact of prompt sensitivity on the task. We collected prompts for relevance assessment from 15 human experts and 15 LLMs across three tasks — binary, graded, and pairwise — yielding 90 prompts in total. After filtering out unusable prompts from three humans and three LLMs, we employed the remaining 72 prompts with three different LLMs as judges to label document/query pairs from two TREC Deep Learning Datasets (2020 and 2021). We compare LLM-generated labels with TREC official human labels using Cohen's " + }, + { + "bbox": [ + 50, + 198, + 295, + 441 + ], + "type": "inline_equation", + "content": "\\kappa" + }, + { + "bbox": [ + 50, + 198, + 295, + 441 + ], + "type": "text", + "content": " and pairwise agreement measures. In addition to investigating the impact of prompt variations on agreement with human labels, we compare human- and LLM-generated prompts and analyze differences among different LLMs as judges. We also compare human- and LLM-generated prompts with the standard UMBRELA prompt used for relevance assessment by Bing and TREC 2024 Retrieval Augmented Generation (RAG) Track. To support future research in LLM-based evaluation, we release all data and prompts at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 450, + 124, + 463 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 450, + 124, + 463 + ], + "spans": [ + { + "bbox": [ + 51, + 450, + 124, + 463 + ], + "type": "text", + "content": "CCS Concepts" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 465, + 295, + 487 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 465, + 295, + 487 + ], + "spans": [ + { + "bbox": [ + 50, + 465, + 295, + 487 + ], + "type": "text", + "content": "- Information systems " + }, + { + "bbox": [ + 50, + 465, + 295, + 487 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 50, + 465, + 295, + 487 + ], + "type": "text", + "content": " Evaluation of retrieval results; Relevance assessment; Test collections." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 51, + 498, + 104, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 498, + 104, + 510 + ], + "spans": [ + { + "bbox": [ + 51, + 498, + 104, + 510 + ], + "type": "text", + "content": "Keywords" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 512, + 267, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 512, + 267, + 523 + ], + "spans": [ + { + "bbox": [ + 51, + 512, + 267, + 523 + ], + "type": "text", + "content": "Large Language Models, Relevance Judgments, Evaluation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 52, + 537, + 134, + 548 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 537, + 134, + 548 + ], + "spans": [ + { + "bbox": [ + 52, + 537, + 134, + 548 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 50, + 552, + 295, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 552, + 295, + 607 + ], + "spans": [ + { + "bbox": [ + 50, + 552, + 295, + 607 + ], + "type": "text", + "content": "Large Language Models (LLMs) are increasingly used for evaluation across various domains, including natural language processing and automated content assessment [1, 4, 9, 11, 28, 32]. The information retrieval (IR) community has been an early adopter of LLMs for relevance assessment [19, 24, 27, 35, 41]. Numerous studies have" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 617, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 52, + 675, + 117, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 675, + 117, + 684 + ], + "spans": [ + { + "bbox": [ + 52, + 675, + 117, + 684 + ], + "type": "text", + "content": "SIGIR '25, Padua, Italy" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "type": "text", + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 52, + 693, + 162, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 162, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 162, + 700 + ], + "type": "text", + "content": "ACM ISBN 979-8-4007-1592-1/2025/07" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/3726302.3730159" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 185, + 558, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 185, + 558, + 207 + ], + "spans": [ + { + "bbox": [ + 314, + 185, + 558, + 207 + ], + "type": "text", + "content": "confirmed that LLM-generated relevance labels closely align with human labels under multiple measures of agreement [26, 36, 37]." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 207, + 559, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 207, + 559, + 393 + ], + "spans": [ + { + "bbox": [ + 313, + 207, + 559, + 393 + ], + "type": "text", + "content": "Nonetheless, despite the widespread adoption of LLMs for relevance assessment, prompting strategies vary substantially across studies [2, 3, 20, 33]. An experiment reported at the LLM4Eval Workshop in SIGIR 2024 on Large Language Models for Evaluation in Information Retrieval [29], analyzed how different prompts influence agreement with human judgments and system rankings [28]. While multiple studies have examined how LLMs respond to different prompting strategies [5, 10, 23, 25, 34], these studies have generally been conducted with prompts tuned to specific LLMs and collections, or where prompt variants are constrained by templates [6]. As a complement to these studies, we report on a study of prompts from a variety of independent sources that have not been tuned to LLMs or collections, allowing us to examine the robustness of LLM-based relevance assessment under different prompting strategies. This investigation also allows us to compare different LLMs as judges to determine the degree to which different LLMs are sensitive to prompt modifications." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 313, + 393, + 559, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 393, + 559, + 525 + ], + "spans": [ + { + "bbox": [ + 313, + 393, + 559, + 525 + ], + "type": "text", + "content": "We collected and analyzed prompts generated by both human experts and LLMs themselves. We designed a guideline for prompting LLMs to perform relevance assessment following three different approaches: binary, graded, and pairwise. While most previous studies have focused on graded relevance, we believe it is crucial to explore a wider range of relevance assessment methods, as they have proven effective in assessing different scenarios in the evaluation of information-seeking systems [7, 8, 13-15, 21, 22, 31, 38-40]. As a benefit to employing LLMs for relevance assessment, it becomes easier to explore different approaches to relevance assessment since human judges do not need to be recruited and trained separately for each approach." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 525, + 559, + 646 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 525, + 559, + 646 + ], + "spans": [ + { + "bbox": [ + 313, + 525, + 559, + 646 + ], + "type": "text", + "content": "We recruited 15 human participants to create prompts for each of the three assessment approaches. As part of the recruitment process, we ensured that the participants were familiar with prompt engineering and relevance assessment principles, as detailed in Section 2. As a result of this inclusion criteria for recruitment, most participants were drawn from three academia NLP/IR labs. We also collected prompts from 15 different open source and commercial LLMs. Our primary goal is to understand prompt sensitivity in LLM-based relevance judgment [30], including its impact, robustness, and variation across different LLMs. Additionally, we explore the effectiveness of LLM as prompt generators." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 313, + 646, + 559, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 646, + 559, + 700 + ], + "spans": [ + { + "bbox": [ + 313, + 646, + 559, + 700 + ], + "type": "text", + "content": "We performed relevance judgment experiments using data from two years of the TREC Deep Learning Track: DL 2020 [16], and DL 2021 [17]. Using the prompts created by both human participants and LLMs, we conducted relevance assessments on query-document pairs from these datasets using two open-source LLMs - LLaMA" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 216, + 36, + 558 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 216, + 36, + 558 + ], + "spans": [ + { + "bbox": [ + 14, + 216, + 36, + 558 + ], + "type": "text", + "content": "arXiv:2504.12408v1 [cs.IR] 16 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "content": "3.2-3b and Mistral 7b - and one commercial LLM GPT-4o. Our experiment incorporates the three approaches to relevance assessment (binary, graded, and pairwise) with prompts from both humans and LLMs using three different LLMs as judges. Through our experiments, we address the following research questions:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 142, + 295, + 285 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 51, + 142, + 295, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 142, + 295, + 186 + ], + "spans": [ + { + "bbox": [ + 51, + 142, + 295, + 186 + ], + "type": "text", + "content": "- RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Given a clear task objective, how do different prompts influence the effectiveness of each approach to LLM-based relevance judgment?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 186, + 295, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 186, + 295, + 219 + ], + "spans": [ + { + "bbox": [ + 51, + 186, + 295, + 219 + ], + "type": "text", + "content": "- RQ2. LLMs as Prompt Generators: How effective are LLM-generated prompts for relevance judgment, and how do they compare to human-crafted prompts?" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 220, + 294, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 220, + 294, + 252 + ], + "spans": [ + { + "bbox": [ + 51, + 220, + 294, + 252 + ], + "type": "text", + "content": "- RQ3. Prompt Robustness Across LLMs: Are there prompts that consistently perform well across different LLMs, regardless of the model used as a judge?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 252, + 295, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 252, + 295, + 285 + ], + "spans": [ + { + "bbox": [ + 51, + 252, + 295, + 285 + ], + "type": "text", + "content": "- RQ4. Model-Specific Sensitivity to Prompts: Is prompt sensitivity consistent across all models, or do some LLMs show greater variability in performance?" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 287, + 295, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 287, + 295, + 343 + ], + "spans": [ + { + "bbox": [ + 50, + 287, + 295, + 343 + ], + "type": "text", + "content": "To ensure reproducibility, we have made all data and experimental artifacts publicly available at https://github.com/Narabzad/prompt-sensitivity-relevance-judgements/. The study reported in this paper, and its associated data release, has received ethics clearance as human subjects research from our institution." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 353, + 154, + 365 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 353, + 154, + 365 + ], + "spans": [ + { + "bbox": [ + 51, + 353, + 154, + 365 + ], + "type": "text", + "content": "2 Prompt Creation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 369, + 172, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 369, + 172, + 380 + ], + "spans": [ + { + "bbox": [ + 51, + 369, + 172, + 380 + ], + "type": "text", + "content": "2.1 Prompt generation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 383, + 295, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 383, + 295, + 536 + ], + "spans": [ + { + "bbox": [ + 50, + 383, + 295, + 536 + ], + "type": "text", + "content": "To investigate the impact of prompting on LLM-based relevance judgment, we collected data from both human participants and LLMs, ensuring that the task objective remained clear and consistent (sharing the same intent) across all participants. We prepared guidelines for prompt writing1, which provides detailed explanations of the three relevance judgment tasks: 1) Binary relevance — a passage is either relevant (1) or not relevant (0) to a query. 2) Graded relevance — a passage is rated on a 0-3 scale, where 3 indicates perfect relevance to the query. 3) Pairwise relevance — given two passages, chose the passage more relevant to the query. In the guideline, each task is illustrated with examples from the TREC Deep Learning 2019 [18], helping to ensure that both humans and LLMs had a well-defined understanding of the task. These examples could also be used as (few shot) examples if desired." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 536, + 295, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 536, + 295, + 624 + ], + "spans": [ + { + "bbox": [ + 50, + 536, + 295, + 624 + ], + "type": "text", + "content": "The guidelines specify a Python-based format, where participants (both human and LLMs) were required to fill in structured Python dictionaries. More specifically, participants had to provide both the \"system message\" and \"user message\" fields for the prompts, following the format commonly used in LLM-based prompting (e.g., OpenAI models and open-source alternatives such as those from Ollama). This structured approach ensures compatibility across different LLM implementations." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 624, + 295, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 624, + 295, + 690 + ], + "spans": [ + { + "bbox": [ + 50, + 624, + 295, + 690 + ], + "type": "text", + "content": "We recruited 15 human participants, each of whom had at least a Master's degree in computer science, were fluent in English, and had prior experience working with LLMs via API usage or coding. Additionally, these participants had previously published at least one paper in an IR-focused conference. Each participant received a $10 gift card as a token of appreciation for their time and effort." + } + ] + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 318, + 105, + 566, + 137 + ], + "blocks": [ + { + "bbox": [ + 335, + 83, + 538, + 95 + ], + "lines": [ + { + "bbox": [ + 335, + 83, + 538, + 95 + ], + "spans": [ + { + "bbox": [ + 335, + 83, + 538, + 95 + ], + "type": "text", + "content": "Table 1: List of LLMs used for prompt generation." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 105, + 566, + 137 + ], + "lines": [ + { + "bbox": [ + 318, + 105, + 566, + 137 + ], + "spans": [ + { + "bbox": [ + 318, + 105, + 566, + 137 + ], + "type": "table", + "html": "
GPT-4oGPT-4o MiniClaude 3.5LLaMA 3.2Phi-4
Mistral-largeDeepSeek-v3Amazon-Nova-Pro-v1Gemma-2-9bGrok-2
Gemini 2Jamba-1.5Athene-v2GPT01GPT01 Mini
", + "image_path": "5b718910606f79a68ac8219463a79dfb50a2541cf96d49063477e0e1ff84af54.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 185, + 559, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 185, + 559, + 307 + ], + "spans": [ + { + "bbox": [ + 313, + 185, + 559, + 307 + ], + "type": "text", + "content": "For prompt creation, we also used 15 different LLMs from the ChatBotArena² platform [12], which enables the execution of various LLMs online. We provided the same data collection guideline to the LMMs, including the task description and examples, ensuring that the LLMs received identical instructions to those given to human participants. Similar to human participants, each LLM was asked to complete the \"system message\" and \"user message\" fields in our Python function for relevance judgment. This setup allows us to systematically compare the impact of prompting across both groups. Table 1 provides the list of LLMs we used in this experiment for generating prompts for relevance judgments." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 323, + 452, + 336 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 323, + 452, + 336 + ], + "spans": [ + { + "bbox": [ + 314, + 323, + 452, + 336 + ], + "type": "text", + "content": "2.2 Filtering and cleaning" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 338, + 559, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 338, + 559, + 492 + ], + "spans": [ + { + "bbox": [ + 313, + 338, + 559, + 492 + ], + "type": "text", + "content": "To maintain consistency, we did not modify or provide additional instructions for any LLMs or human participants. Among the LLMs, two failed to complete the task because they deemed the task to be inappropriate, or repeatedly asked about examples. Among human participants, only one used a few-shot approach with examples. The rest did not provide any examples in their prompts. When testing the outputs of the collected prompts, not all of them were able to generate the expected format cleanly. Some prompts produced responses that required additional cleaning, such as verbose outputs like \"The passage is relevant, so the answer is: 1\" instead of simply returning 1. To ensure consistency, we examined the all generated output and applied necessary cleaning. After filtering and cleaning, we finalized 12 human-generated prompts and 12 LLM-generated prompts for use in our experiments." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 509, + 429, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 509, + 429, + 521 + ], + "spans": [ + { + "bbox": [ + 314, + 509, + 429, + 521 + ], + "type": "text", + "content": "2.3 Prompt Diversity" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 523, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 523, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 523, + 559, + 710 + ], + "type": "text", + "content": "To better understand the variation in prompts, we examined the diversity of both human-generated and LLM-generated prompts. Specifically, we analyzed both user prompts and system prompts separately, as they serve distinct roles in guiding the LLM's response. In a prompt the user message provides the direct instructions given to the model, specifying what information is needed. In contrast, the system message provides context for the task, defining the LLM's role and expected behavior (e.g., \"You are an expert relevance judgment assessor\"). Figure 1 illustrates the distribution of unique terms used across all human-generated (in green) and LLM-generated (in red) prompts. As shown in this figure, human-generated prompts exhibit greater diversity in wording when compared to LLM-generated ones. This suggests that humans introduce more nuanced descriptions and varied phrasing when defining the task, while LLM-generated system prompts tend to rely on more standardized language. Additionally, system messages exhibit greater lexical diversity compared to user messages." + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 436, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 436, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 436, + 60, + 559, + 69 + ], + "type": "text", + "content": "Negar Arabzadeh and Charles L.A. Clarke" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 700, + 122, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 122, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 122, + 709 + ], + "type": "text", + "content": "1https://bit.ly/4hP0EMg" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 87, + 266, + 178 + ], + "blocks": [ + { + "bbox": [ + 75, + 87, + 266, + 178 + ], + "lines": [ + { + "bbox": [ + 75, + 87, + 266, + 178 + ], + "spans": [ + { + "bbox": [ + 75, + 87, + 266, + 178 + ], + "type": "image", + "image_path": "1fc35f1fb4afa7dc61ae8b75f2e5a0925dc916b91e1fc9063dd02801e0432c0d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 52, + 193, + 296, + 216 + ], + "lines": [ + { + "bbox": [ + 52, + 193, + 296, + 216 + ], + "spans": [ + { + "bbox": [ + 52, + 193, + 296, + 216 + ], + "type": "text", + "content": "Figure 1: Diversity of words across human and LLM-generated prompts." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 236, + 205, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 236, + 205, + 248 + ], + "spans": [ + { + "bbox": [ + 52, + 236, + 205, + 248 + ], + "type": "text", + "content": "3 Experimental Methodology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 251, + 294, + 457 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 251, + 294, + 457 + ], + "spans": [ + { + "bbox": [ + 53, + 251, + 294, + 457 + ], + "type": "text", + "content": "Data We utilize the TREC Deep Learning Track datasets from 2020 and 2021. The DL-20 dataset contains 54 judged queries with 11,386 relevance assessments from MS MARCO V1 collection, while the DL-21 dataset includes 53 judged queries and 10,828 assessments from MS MARCO V2. Both datasets have been manually annotated by NIST assessors following the TREC relevance judgment guidelines. The assessors evaluate each document-query pair based on a graded relevance scale, ranging from not relevant (0) to highly relevant (3). The assessment process involves pooling top-ranked documents from multiple retrieval systems, which were then judged by human annotators. Using this data allows us to compare the three different variations of LLM-based judgments i.e., binary, graded, and pairwise. For graded relevance, we compare against the actual graded labels. For binary judgments, following prior work [19, 37], we classify levels 2 and 3 as relevant and levels 0 and 1 as non-relevant. For pairwise judgments, we compare documents with different relevance levels, assuming that a document with a higher relevance level should be ranked as more relevant than one with a lower relevance level." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 52, + 459, + 294, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 459, + 294, + 514 + ], + "spans": [ + { + "bbox": [ + 52, + 459, + 294, + 514 + ], + "type": "text", + "content": "LLMs for Relevance Judgments. To perform relevance assessment, we employed three different LLMs: one commercial model, GPT-40, and two open-source models, LLaMA 3.2-3B and Mistral-7B. We implemented our experiments using OpenAI and Ollama, running all prompts with a temperature setting of 0." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 514, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 514, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 53, + 514, + 295, + 689 + ], + "type": "text", + "content": "Data Sampling. We conducted experiments on all query-document pairs for binary and graded relevance judgments using the open-source models. However, due to computational constraints, we were unable to run all 24 valid prompts across all query-document pairs for GPT-40. Instead, we randomly sampled up to 10 documents per query for each of the four relevance levels (0-3). If fewer than 10 documents were available for a given relevance level, we included all available documents. For pairwise judgments, evaluating all possible pairs was not feasible due to their quadratic growth. Instead, we categorized documents for each query into three groups: \"highly relevant\", \"relevant\", and \"non-relevant\". The \"highly relevant\" category corresponds to the highest available relevance level for that query, which in TREC-style annotations could be level 3 or level 2, depending on availability. The \"non-relevant\" category includes all level 0 documents, while any intermediate relevance level (typically level 1, or levels 1 and 2 if level 3 exists) was classified as \"relevant\"." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 318, + 115, + 572, + 208 + ], + "blocks": [ + { + "bbox": [ + 317, + 83, + 558, + 105 + ], + "lines": [ + { + "bbox": [ + 317, + 83, + 558, + 105 + ], + "spans": [ + { + "bbox": [ + 317, + 83, + 558, + 105 + ], + "type": "text", + "content": "Table 2: Mean and variance of agreement between LLM-based and human relevance judgments across different settings." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 115, + 572, + 208 + ], + "lines": [ + { + "bbox": [ + 318, + 115, + 572, + 208 + ], + "spans": [ + { + "bbox": [ + 318, + 115, + 572, + 208 + ], + "type": "table", + "html": "
Modelcrafted byBinaryGradedPairwise
MeanVarianceMeanVarianceMeanVariance
GPT-4oLLM0.4340.0030.2150.0010.8490.000
Human0.2700.0980.2150.0010.5780.139
LLaMA 3.2LLM0.3030.0100.0330.0020.4390.066
Human0.1670.0410.1020.0030.3300.073
MistralLLM0.4050.0010.0080.0040.5740.014
Human0.2430.0510.0040.0050.4420.073
", + "image_path": "14ed2005b76ba13d7e932f558d55c0b027ef2666ca965213ba06bad4416456ce.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 317, + 234, + 558, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 234, + 558, + 376 + ], + "spans": [ + { + "bbox": [ + 317, + 234, + 558, + 376 + ], + "type": "text", + "content": "From these three categories, we constructed document pairs for pairwise judgments. Specifically, we sampled 10 pairs per query from each of the following comparisons: \"highly relevant vs. non-relevant\", \"relevant vs. non-relevant\", and \"highly relevant vs. relevant\" (up to 30 pairs in total). If fewer than 10 pairs were available for a given comparison, we included as many as possible. Additionally, for the pairwise setting, we minimized positional bias by evaluating each document pair twice, swapping the order of the documents in the second run. The result is counted as \"agree\" if the LLM favors the more relevant passage in both comparisons, \"tie\" if the LLM's decisions are inconsistent when the passage order is swapped, and \"disagree\" if the LLM consistently selects the passage with a lower relevance level assigned by human annotators." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 317, + 388, + 436, + 401 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 388, + 436, + 401 + ], + "spans": [ + { + "bbox": [ + 317, + 388, + 436, + 401 + ], + "type": "text", + "content": "4 Results and Findings" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 403, + 558, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 403, + 558, + 588 + ], + "spans": [ + { + "bbox": [ + 317, + 403, + 558, + 588 + ], + "type": "text", + "content": "In order to explore the research questions raised in the introduction, we investigated the agreement of LLM-based relevance judgments from different prompts with human annotations on TREC 2020 and 2021 using three different LLMs, as shown in Figure 2. For binary and graded relevance judgments, agreement is measured using Cohen's Kappa " + }, + { + "bbox": [ + 317, + 403, + 558, + 588 + ], + "type": "inline_equation", + "content": "(\\kappa)" + }, + { + "bbox": [ + 317, + 403, + 558, + 588 + ], + "type": "text", + "content": ". For pairwise judgments, since the task involves assessing agreement with the actual ranking of pairs, we report the percentage of cases where the LLM's preference agrees with the expected order. In this figure, the leftmost two columns represent the results for binary, the middle two columns correspond to graded, and the rightmost two columns display the results from pairwise relevance judgment. The green, blue, and red bars indicate agreement for GPT-4o, LLAMA 3.2, and Mistral, respectively. In each pair of plots, the left plot presents results for DL-20, while the right plot corresponds to DL-21. The bottom 12 bars represent prompts crafted by LLMs; on top of them there are 12 bars corresponding to prompts created by humans." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 590, + 558, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 590, + 558, + 708 + ], + "spans": [ + { + "bbox": [ + 317, + 590, + 558, + 708 + ], + "type": "text", + "content": "In addition to results from the human- and LLM-written prompts, we also report the results of UMBRELA assessments at the top of the graded relevance sub-figure (middle). UMBRELA is an open-source reproduction of Microsoft's Bing LLM-based relevance assessor [35], designed to automate relevance judgments effectively [36, 37]. It follows a structured prompting approach and has demonstrated high correlation with both human annotations and system rankings across multiple TREC Deep Learning Tracks (2019-2023). Notably, UMBRELA has been integrated into TREC 2024 RAG for automated evaluation, which further validated its reliability as an alternative to human assessors. We consider UMBRELA a reliable and effective" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 319, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 319, + 69 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 319, + 69 + ], + "type": "text", + "content": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 443, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 443, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 443, + 60, + 558, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 700, + 109, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 109, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 109, + 709 + ], + "type": "text", + "content": "2https://Imarena.ai/" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 83, + 550, + 434 + ], + "blocks": [ + { + "bbox": [ + 62, + 83, + 550, + 434 + ], + "lines": [ + { + "bbox": [ + 62, + 83, + 550, + 434 + ], + "spans": [ + { + "bbox": [ + 62, + 83, + 550, + 434 + ], + "type": "image", + "image_path": "6a0acc0eba950fda3479939265810549aec4da0f6fa29aaac59d68a2f27cbb35.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 49, + 445, + 558, + 491 + ], + "lines": [ + { + "bbox": [ + 49, + 445, + 558, + 491 + ], + "spans": [ + { + "bbox": [ + 49, + 445, + 558, + 491 + ], + "type": "text", + "content": "Figure 2: Agreement of LLM-based relevance judgments with human annotations across different prompts and relevance judgment tasks. UMBRELA represents the reproduction of Bing's LLM assessor introduced in [37]. Otherwise, the top 12 bars " + }, + { + "bbox": [ + 49, + 445, + 558, + 491 + ], + "type": "inline_equation", + "content": "(\\mathbf{H}^{*})" + }, + { + "bbox": [ + 49, + 445, + 558, + 491 + ], + "type": "text", + "content": " represent human-crafted prompts, while the bottom 12 correspond to LLM-generated prompts. The dashed lines show the mean of agreement in LLM -crafted prompts and human-crafted prompts separately." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 506, + 295, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 506, + 295, + 561 + ], + "spans": [ + { + "bbox": [ + 50, + 506, + 295, + 561 + ], + "type": "text", + "content": "prompt and we believe comparing its performance against human-crafted and LLM-generated prompts in graded relevance judgments would bring interesting insights. Additionally, Table 2 summarizes Figure 2 by providing the mean and variance of agreement scores across the two datasets and different relevance judgments." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 561, + 295, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 561, + 295, + 583 + ], + "spans": [ + { + "bbox": [ + 50, + 561, + 295, + 583 + ], + "type": "text", + "content": "We now consider investigating each of our research questions in light of these agreement results." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 583, + 295, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 583, + 295, + 704 + ], + "spans": [ + { + "bbox": [ + 50, + 583, + 295, + 704 + ], + "type": "text", + "content": "RQ1. Impact of Prompts on LLM-based Relevance Judgment Approaches: Figure 2 and Table 2 reveal significant variance across different LLM-based relevance judgment approaches. Binary and pairwise methods exhibit the least sensitivity to input prompts, maintaining more consistent agreement. In contrast, graded relevance judgments are highly sensitive to prompt variations. We note that while binary and pairwise methods operate with only two choices, graded relevance introduces greater variability. Particularly on graded judgments, GPT-40 demonstrates relatively stable performance but LLaMA 3.2 and Mistral show considerable fluctuations across different prompts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 506, + 559, + 693 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 313, + 506, + 559, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 506, + 559, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 506, + 559, + 605 + ], + "type": "text", + "content": "RQ2. LLMs as Prompt Generators: Table 2 shows that LLM-generated prompts generally yield higher average agreement with human annotations. However, for graded relevance judgments, the difference is minimal. This may be due to (i) participants' greater familiarity with graded assessments or (ii) the inherently subjective nature of assigning relevance levels, which may require more calibration with human annotators. Additionally, LLM-generated prompts exhibit lower variance in agreement compared to human-crafted prompts, indicating less sensitivity to prompt variations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 605, + 559, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 559, + 693 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 559, + 693 + ], + "type": "text", + "content": "RQ3. Prompt Robustness Across LLMs: Figure 3 analyzes inter-agreement rates among different prompt groups using Krippendorff's alpha. Here we measure agreement between different prompt's output, regardless of their alignment with human judgments. The results show that LLM-generated prompts exhibit higher inter-agreement than human-crafted ones, likely due to the greater linguistic diversity in human-generated prompts, as seen in Figure 1. This suggests that LLM-generated prompts are more robust" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 436, + 60, + 559, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 436, + 60, + 559, + 68 + ], + "spans": [ + { + "bbox": [ + 436, + 60, + 559, + 68 + ], + "type": "text", + "content": "Negar Arabzadeh and Charles L.A. Clarke" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 75, + 85, + 266, + 199 + ], + "blocks": [ + { + "bbox": [ + 75, + 85, + 266, + 199 + ], + "lines": [ + { + "bbox": [ + 75, + 85, + 266, + 199 + ], + "spans": [ + { + "bbox": [ + 75, + 85, + 266, + 199 + ], + "type": "image", + "image_path": "32d199718463392947d076c7e80ed01b8a647bf3a5e940ced5752b21d5a01dc2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 211, + 295, + 235 + ], + "lines": [ + { + "bbox": [ + 50, + 211, + 295, + 235 + ], + "spans": [ + { + "bbox": [ + 50, + 211, + 295, + 235 + ], + "type": "text", + "content": "Figure 3: Krippendorff's inter-agreement rate between all the prompts on two datasets." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 251, + 295, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 251, + 295, + 392 + ], + "spans": [ + { + "bbox": [ + 50, + 251, + 295, + 392 + ], + "type": "text", + "content": "than human-crafted ones. While some human-crafted prompts performed well across all models, prompt effectiveness varies significantly between LLMs, with no single prompt consistently excelling across all models. However, for graded assessments, UMBRELA consistently demonstrated high performance across different LLMs and it emerged as one of the most effective prompts across all models. UMBRELA had previously shown strong correlation with human judgments on TREC DL tracks [37]. We hypothesize that UMBRELA's strong and consistent performance may stem from how its prompt deconstructs the concept of relevance into finer-grained aspects, such as trustworthiness and alignment with intent. This structured approach likely prevents the LLM from relying on its own interpretation of relevance." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 393, + 295, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 393, + 295, + 482 + ], + "spans": [ + { + "bbox": [ + 50, + 393, + 295, + 482 + ], + "type": "text", + "content": "RQ4. Model-Specific Sensitivity to Prompts: From Figure 2, we observe that GPT-4o demonstrates high consistency across most prompts and all relevance assessment approaches. In contrast, the performance of LLaMA 3.2 and Mistral varies significantly depending on the prompt and assessment method. This variability is further confirmed by the variance of agreement reported in Table 2. Notably, GPT-4o exhibits consistently low variance in agreement, particularly when prompted with LLM-crafted prompts." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 491, + 209, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 491, + 209, + 502 + ], + "spans": [ + { + "bbox": [ + 51, + 491, + 209, + 502 + ], + "type": "text", + "content": "5 Conclusion and Limitations" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 505, + 295, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 505, + 295, + 637 + ], + "spans": [ + { + "bbox": [ + 50, + 505, + 295, + 637 + ], + "type": "text", + "content": "In this study, we investigated the sensitivity of LLM-based relevance judgments to different prompting strategies across multiple models. We examined how prompts, whether human- or LLM-generated, influence judgment effectiveness, their robustness across different LLMs, and the extent to which models exhibit variability in response to prompt modifications. One specific outcome is to confirm the performance of UMBRELA as a leading prompt for LLM-based graded relevance assessment. Despite these contributions, our study has limitations. Our human participants primarily had a computer science background with experience writing prompts for LLMs. Additionally, we evaluated only three LLMs as judges, limiting the generalizability of our findings." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 52, + 646, + 108, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 646, + 108, + 656 + ], + "spans": [ + { + "bbox": [ + 52, + 646, + 108, + 656 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 658, + 295, + 708 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 55, + 658, + 295, + 684 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 658, + 295, + 684 + ], + "spans": [ + { + "bbox": [ + 55, + 658, + 295, + 684 + ], + "type": "text", + "content": "[1] Marwah Alaofi, Negar Arabzadeh, Charles LA Clarke, and Mark Sanderson. 2024. Generative information retrieval evaluation. In Information Access in the Era of Generative AI. Springer, 135-159." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 684, + 294, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 684, + 294, + 708 + ], + "spans": [ + { + "bbox": [ + 55, + 684, + 294, + 708 + ], + "type": "text", + "content": "[2] Neger Arabzadeh, Amin Bigdeli, and Charles L. A. Clarke. 2024. Adapting Standard Retrieval Benchmarks to Evaluate Generated Answers. In 46th European Conference on Information Retrieval. Glasgow, Scotland." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 86, + 559, + 709 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 320, + 86, + 559, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 86, + 559, + 102 + ], + "spans": [ + { + "bbox": [ + 320, + 86, + 559, + 102 + ], + "type": "text", + "content": "[3] Negar Arabzadeh and Charles LA Clarke. 2024. A Comparison of Methods for Evaluating Generative IR. arXiv preprint arXiv:2404.04044 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 320, + 103, + 559, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 103, + 559, + 159 + ], + "spans": [ + { + "bbox": [ + 320, + 103, + 559, + 159 + ], + "type": "text", + "content": "[4] Negar Arabzadeh, Siqing Huo, Nikhil Mehta, Qingyun Wu, Chi Wang, Ahmed Hassan Awadallah, Charles L. A. Clarke, and Julia Kiseleva. 2024. Assessing and Verifying Task Utility in LLM-Powered Applications. In Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (Eds.). Association for Computational Linguistics, Miami, Florida, USA, 21868-21888. doi:10.18653/v1/2024.emnlp-main.1219" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 320, + 159, + 559, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 159, + 559, + 191 + ], + "spans": [ + { + "bbox": [ + 320, + 159, + 559, + 191 + ], + "type": "text", + "content": "[5] Simran Arora, Avanika Narayan, Mayee F. Chen, Laurel Orr, Neel Guha, Kush Bhatia, Ines Chami, Frederic Sala, and Christopher Re. 2022. Ask Me Anything: A simple strategy for prompting language models. arXiv:2210.02441 [cs.CL] https://arxiv.org/abs/2210.02441" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 191, + 559, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 191, + 559, + 222 + ], + "spans": [ + { + "bbox": [ + 320, + 191, + 559, + 222 + ], + "type": "text", + "content": "[6] Leif Azzopardi, Charles LA Clarke, Paul Kantor, Bhaskar Mitra, Johanne R Trippas, Zhaochun Ren, Mohammad Aliennejadi, Negar Arabzadeh, Raman Chandrasekar, Maarten de Rijke, et al. 2024. Report on The Search Futures Workshop at ECIR 2024. In ACM SIGIR Forum, Vol. 58. ACM New York, NY, USA, 1-41." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 222, + 559, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 222, + 559, + 247 + ], + "spans": [ + { + "bbox": [ + 320, + 222, + 559, + 247 + ], + "type": "text", + "content": "[7] Chris Buckley and Ellen M Voorhees. 2004. Retrieval evaluation with incomplete information. In Proceedings of the 27th annual international ACM SIGIR conference on Research and development in information retrieval. 25-32." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 247, + 559, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 247, + 559, + 270 + ], + "spans": [ + { + "bbox": [ + 320, + 247, + 559, + 270 + ], + "type": "text", + "content": "[8] Ben Carterette, Paul N. Bennett, David Maxwell Chickering, and Susan T. Dumais. 2008. Here or there: Preference judgments for Relevance. Computer Science Department Faculty Publication Series 46. University of Massachusetts Amherst." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 270, + 559, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 270, + 559, + 302 + ], + "spans": [ + { + "bbox": [ + 320, + 270, + 559, + 302 + ], + "type": "text", + "content": "[9] Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Linyi Yang, Kaijie Zhu, Hao Chen, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, et al. 2024. A survey on evaluation of large language models. ACM Transactions on Intelligent Systems and Technology 15, 3 (2024), 1-45." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 317, + 302, + 559, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 302, + 559, + 326 + ], + "spans": [ + { + "bbox": [ + 317, + 302, + 559, + 326 + ], + "type": "text", + "content": "[10] Anowoy Chatterjee, HSVNS Kowndinya Renduchintala, Sumit Bhatia, and Tanmoy Chakraborty. 2024. POSIX: A Prompt Sensitivity Index For Large Language Models. arXiv preprint arXiv:2410.02185 (2024)." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 326, + 559, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 326, + 559, + 342 + ], + "spans": [ + { + "bbox": [ + 317, + 326, + 559, + 342 + ], + "type": "text", + "content": "[11] Cheng-Han Chiang and Hung-yi Lee. 2023. Can large language models be an alternative to human evaluations? arXiv preprint arXiv:2305.01937 (2023)." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 342, + 559, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 342, + 559, + 373 + ], + "spans": [ + { + "bbox": [ + 317, + 342, + 559, + 373 + ], + "type": "text", + "content": "[12] Wei-Lin Chiang, Lianmin Zheng, Ying Sheng, Anastasios Nikolas Angelopoulos, Tianle Li, Dacheng Li, Hao Zhang, Banghua Zhu, Michael Jordan, Joseph E Gonzalez, et al. 2024. Chatbot arena: An open platform for evaluating llms by human preference. arXiv preprint arXiv:2403.04132 (2024)." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 373, + 559, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 373, + 559, + 397 + ], + "spans": [ + { + "bbox": [ + 317, + 373, + 559, + 397 + ], + "type": "text", + "content": "[13] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing Top-" + }, + { + "bbox": [ + 317, + 373, + 559, + 397 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 317, + 373, + 559, + 397 + ], + "type": "text", + "content": " Preferences. ACM Trans. Inf. Syst. 39, 3, Article 33 (may 2021), 21 pages. doi:10.1145/3451161" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 398, + 559, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 398, + 559, + 414 + ], + "spans": [ + { + "bbox": [ + 317, + 398, + 559, + 414 + ], + "type": "text", + "content": "[14] Charles L. A. Clarke, Alexandra Vtyurina, and Mark D. Smucker. 2021. Assessing top-" + }, + { + "bbox": [ + 317, + 398, + 559, + 414 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 317, + 398, + 559, + 414 + ], + "type": "text", + "content": " preferences. ACM Transactions on Information Systems 39, 3 (July 2021)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 414, + 559, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 414, + 559, + 437 + ], + "spans": [ + { + "bbox": [ + 317, + 414, + 559, + 437 + ], + "type": "text", + "content": "[15] Cyril W Cleverdon. 1991. The significance of the Cranfield tests on index languages. In Proceedings of the 14th annual international ACM SIGIR conference on Research and development in information retrieval. 3-12." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 437, + 559, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 437, + 559, + 460 + ], + "spans": [ + { + "bbox": [ + 317, + 437, + 559, + 460 + ], + "type": "text", + "content": "[16] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, and Daniel Campos. 2021. Overview of the TREC 2020 deep learning track. arXiv:2102.07662 [cs.IR] https://arxiv.org/abs/2102.07662" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 461, + 559, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 461, + 559, + 493 + ], + "spans": [ + { + "bbox": [ + 317, + 461, + 559, + 493 + ], + "type": "text", + "content": "[17] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Jimmy Lin. 2022. Overview of the TREC 2021 deep learning track. In Text REtrieval Conference (TREC). NIST, TREC. https://www.microsoft.com/en-us/research/publication/overview-of-the-trec-2021-deep-learning-track/" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 493, + 559, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 493, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 317, + 493, + 559, + 517 + ], + "type": "text", + "content": "[18] Nick Craswell, Bhaskar Mitra, Emine Yilmaz, Daniel Campos, and Ellen M Voorhees. 2020. Overview of the TREC 2019 deep learning track. arXiv preprint arXiv:2003.07820 (2020)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 517, + 559, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 517, + 559, + 556 + ], + "spans": [ + { + "bbox": [ + 317, + 517, + 559, + 556 + ], + "type": "text", + "content": "[19] Gugliemo Faggioli, Laura Dietz, Charles LA Clarke, Gianluca Demartini, Matthias Hagen, Claudia Hauff, Noriko Kando, Evangelos Kanoulas, Martin Potthast, Benno Stein, et al. 2023. Perspectives on large language models for relevance judgment. In Proceedings of the 2023 ACM SIGIR International Conference on Theory of Information Retrieval. 39-50." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 556, + 559, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 556, + 559, + 581 + ], + "spans": [ + { + "bbox": [ + 317, + 556, + 559, + 581 + ], + "type": "text", + "content": "[20] Naghmeh Farzi and Laura Dietz. 2024. Pencils down! automatic rubric-based evaluation of retrieve/generate systems. In Proceedings of the 2024 ACM SIGIR International Conference on Theory of Information Retrieval. 175-184." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 581, + 559, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 581, + 559, + 597 + ], + "spans": [ + { + "bbox": [ + 317, + 581, + 559, + 597 + ], + "type": "text", + "content": "[21] David Hawking, Ellen Voorhees, Nick Craswell, Peter Bailey, et al. 1999. Overview of the trec-8 web track. In TREC." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 597, + 559, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 597, + 559, + 628 + ], + "spans": [ + { + "bbox": [ + 317, + 597, + 559, + 628 + ], + "type": "text", + "content": "[22] Gabriella Kazai, Emine Yilmaz, Nick Craswell, and S.M.M. Tahaghoghi. 2013. User Intent and Assessor Disagreement in Web Search Evaluation. In 22nd ACM International Conference on Information and Knowledge Management. San Francisco, California, 699-708." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 628, + 559, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 628, + 559, + 653 + ], + "spans": [ + { + "bbox": [ + 317, + 628, + 559, + 653 + ], + "type": "text", + "content": "[23] Alina Leidinger, Robert van Rooij, and Ekaterina Shutova. 2023. The language of prompting: What linguistic properties make a prompt successful? arXiv:2311.01967 [cs.CL] https://arxiv.org/abs/2311.01967" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 653, + 559, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 653, + 559, + 685 + ], + "spans": [ + { + "bbox": [ + 317, + 653, + 559, + 685 + ], + "type": "text", + "content": "[24] Dawei Li, Bohan Jiang, Liangjie Huang, Alimohammad Beigi, Chengshuai Zhao, Zhen Tan, Amrita Bhattacharjee, Yuxuan Jiang, Canyu Chen, Tianhao Wu, et al. 2024. From Generation to Judgment: Opportunities and Challenges of LLM-as-a-judge. arXiv preprint arXiv:2411.16594 (2024)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 685, + 559, + 709 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 685, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 317, + 685, + 559, + 709 + ], + "type": "text", + "content": "[25] Sheng Lu, Hendrik Schuff, and Iryna Gurevych. 2024. How are Prompts Different in Terms of Sensitivity? In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language" + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 320, + 69 + ], + "type": "text", + "content": "A Human-AI Comparative Analysis of Prompt Sensitivity in LLM-Based Relevance Judgment" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 443, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 443, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 443, + 60, + 559, + 69 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 86, + 295, + 334 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 66, + 86, + 294, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 86, + 294, + 110 + ], + "spans": [ + { + "bbox": [ + 66, + 86, + 294, + 110 + ], + "type": "text", + "content": "Technologies (Volume 1: Long Papers), Kevin Duh, Helena Gomez, and Steven Bethard (Eds.). Association for Computational Linguistics, Mexico City, Mexico, 5833-5856. doi:10.18653/v1/2024.nacl-long.325" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 52, + 110, + 295, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 110, + 295, + 135 + ], + "spans": [ + { + "bbox": [ + 52, + 110, + 295, + 135 + ], + "type": "text", + "content": "[26] Sean MacAvaney and Luca Soldaini. 2023. One-shot labeling for automatic relevance estimation. In Proceedings of the 46th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2230-2235." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 135, + 294, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 294, + 167 + ], + "type": "text", + "content": "[27] Chuan Meng, Negar Arabzadeh, Arian Askari, Mohammad Aliannejadi, and Maarten de Rijke. 2024. Query Performance Prediction using Relevance Judgments Generated by Large Language Models. arXiv preprint arXiv:2404.01012 (2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 167, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 167, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 53, + 167, + 294, + 206 + ], + "type": "text", + "content": "[28] Hossein A Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles LA Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Llm4eval: Large language model for evaluation in ir. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 3040-3043." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 206, + 294, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 206, + 294, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 206, + 294, + 247 + ], + "type": "text", + "content": "[29] Hossein A. Rahmani, Clemencia Siro, Mohammad Aliannejadi, Nick Craswell, Charles L. A. Clarke, Guglielmo Faggioli, Bhaskar Mitra, Paul Thomas, and Emine Yilmaz. 2024. Report on the 1st Workshop on Large Language Model for Evaluation in Information Retrieval (LLM4Eval 2024) at SIGIR 2024. arXiv:2408.05388 [cs.IR] https://arxiv.org/abs/2408.05388" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 247, + 294, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 294, + 270 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 294, + 270 + ], + "type": "text", + "content": "[30] Amirhossein Razavi, Mina Soltangheis, Neger Arabzadeh, Sara Salamat, Morteza Zihayat, and Ebrahim Bagheri. 2025. Benchmarking Prompt Sensitivity in Large Language Models. arXiv preprint arXiv:2502.06065 (2025)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 270, + 294, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 270, + 294, + 294 + ], + "spans": [ + { + "bbox": [ + 53, + 270, + 294, + 294 + ], + "type": "text", + "content": "[31] Tetsuya Sakai and Zhaohao Zeng. 2020. Good evaluation measures based on document preferences. In 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 359-368." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 294, + 294, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 294, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 53, + 294, + 294, + 318 + ], + "type": "text", + "content": "[32] Alireza Salemi and Hamed Zamani. 2024. Evaluating retrieval quality in retrieval-augmented generation. In Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2395-2400." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 318, + 294, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 294, + 334 + ], + "type": "text", + "content": "[33] David P Sander and Laura Dietz. 2021. EXAM: How to Evaluate Retrieve-and-Generate Systems for Users Who Do Not (Yet) Know What They Want.. In" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 87, + 559, + 319 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 332, + 87, + 386, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 332, + 87, + 386, + 95 + ], + "spans": [ + { + "bbox": [ + 332, + 87, + 386, + 95 + ], + "type": "text", + "content": "DESIREs. 136-146." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 317, + 95, + 559, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 559, + 126 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 559, + 126 + ], + "type": "text", + "content": "[34] Melanie Sclar, Yejin Choi, Yulia Tsvetkov, and Alane Suhr. 2023. Quantifying Language Models' Sensitivity to Spurious Features in Prompt Design or: How I learned to start worrying about prompt formatting. arXiv preprint arXiv:2310.11324 (2023)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 317, + 126, + 559, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 126, + 559, + 150 + ], + "spans": [ + { + "bbox": [ + 317, + 126, + 559, + 150 + ], + "type": "text", + "content": "[35] Paul Thomas, Seth Spielman, Nick Craswell, and Bhaskar Mitra. 2023. Large Language Models Can Accurately Predict Searcher Preferences. arXiv preprint arXiv:2309.10621 (2023)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 317, + 150, + 559, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 150, + 559, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 150, + 559, + 182 + ], + "type": "text", + "content": "[36] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Daniel Campos, Nick Craswell, Ian Soboroff, Hoa Trang Dang, and Jimmy Lin. 2024. A Large-Scale Study of Relevance Assessments with Large Language Models: An Initial Look. arXiv:2411.08275 [cs.IR] https://arxiv.org/abs/2411.08275" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 182, + 559, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 182, + 559, + 206 + ], + "spans": [ + { + "bbox": [ + 316, + 182, + 559, + 206 + ], + "type": "text", + "content": "[37] Shivani Upadhyay, Ronak Pradeep, Nandan Thakur, Nick Craswell, and Jimmy Lin. 2024. UMBRELA: Umbrela is the (Open-Source Reproduction of the) Bing RELevance Assessor. arXiv preprint arXiv:2406.06519 (2024)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 206, + 559, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 206, + 559, + 222 + ], + "spans": [ + { + "bbox": [ + 316, + 206, + 559, + 222 + ], + "type": "text", + "content": "[38] Ellen M Voorhees. 2000. Report on trec-9. In ACM SIGIR Forum, Vol. 34. ACM New York, NY, USA, 1-8." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 222, + 559, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 222, + 559, + 255 + ], + "spans": [ + { + "bbox": [ + 316, + 222, + 559, + 255 + ], + "type": "text", + "content": "[39] Xiaohui Xie, Jiaxin Mao, Yiqun Liu, Maarten de Rijke, Haitian Chen, Min Zhang, and Shaoping Ma. 2020. Preference-based evaluation metrics for web image search. In 43st Annual International ACM SIGIR Conference on Research and Development in Information Retrieval. Xi'an, China." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 255, + 559, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 255, + 559, + 286 + ], + "spans": [ + { + "bbox": [ + 316, + 255, + 559, + 286 + ], + "type": "text", + "content": "[40] Xinyi Yan, Chengxi Luo, Charles L. A. Clarke, Nick Craswell, Ellen M. Voorhees, and Pablo Castells. 2022. Human Preferences as Dueling Bandits. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR '22). ACM. doi:10.1145/3477495.3531991" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 286, + 559, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 286, + 559, + 319 + ], + "spans": [ + { + "bbox": [ + 316, + 286, + 559, + 319 + ], + "type": "text", + "content": "[41] Honglei Zhuang, Zhen Qin, Kai Hui, Junru Wu, Le Yan, Xuanhui Wang, and Michael Berdersky. 2023. Beyond Yes and No: Improving Zero-Shot LLM Rankers via Scoring Fine-Grained Relevance Labels. arXiv preprint arXiv:2310.14122 (2023)." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 168, + 68 + ], + "type": "text", + "content": "SIGIR '25, July 13-18, 2025, Padua, Italy" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 436, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 436, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 436, + 60, + 559, + 69 + ], + "type": "text", + "content": "Negar Arabzadeh and Charles L.A. Clarke" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_content_list.json b/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..183296af19d5626977453a5ef7242a66541878f5 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_content_list.json @@ -0,0 +1,2123 @@ +[ + { + "type": "text", + "text": "ON LINEAR REPRESENTATIONS AND PRETRAINING DATA FREQUENCY IN LANGUAGE MODELS", + "text_level": 1, + "bbox": [ + 171, + 99, + 823, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jack Merullo $^{\\diamond}$ Noah A. Smith $^{\\text{♣}}$ Sarah Wiegrefe $^{\\text{♥♣}}$ Yanai Elazar $^{\\text{♥♣}}$", + "bbox": [ + 184, + 171, + 740, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\diamond$ Brown University, $\\diamond$ Allen Institute for AI (Ai2), $\\clubsuit$ University of Washington", + "bbox": [ + 181, + 200, + 696, + 215 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Co-senior authors.", + "bbox": [ + 183, + 215, + 313, + 228 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "jack_merullo@brown.edu, {noah, sarahw, yanaie}@allenai.org", + "bbox": [ + 183, + 229, + 733, + 244 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 450, + 280, + 545, + 296 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Pretraining data has a direct impact on the behaviors and quality of language models (LMs), but we only understand the most basic principles of this relationship. While most work focuses on pretraining data's effect on downstream task behavior, we investigate its relationship to LM representations. Previous work has discovered that, in language models, some concepts are encoded 'linearly' in the representations, but what factors cause these representations to form (or not)? We study the connection between pretraining data frequency and models' linear representations of factual relations (e.g., mapping France to Paris in a capital prediction task). We find evidence that the formation of linear representations is strongly connected to pretraining term frequencies; specifically for subject-relation-object fact triplets, both subject-object co-occurrence frequency and in-context learning accuracy for the relation are highly correlated with linear representations. This is the case across all phases of pretraining, i.e., it is not affected by the model's underlying capability. In OLMo-7B and GPT-J (6B), we discover that a linear representation consistently (but not exclusively) forms when the subjects and objects within a relation co-occur at least 1k and 2k times, respectively, regardless of when these occurrences happen during pretraining (and around 4k times for OLMo-1B). Finally, we train a regression model on measurements of linear representation quality in fully-trained LMs that can predict how often a term was seen in pretraining. Our model achieves low error even on inputs from a different model with a different pretraining dataset, providing a new method for estimating properties of the otherwise-unknown training data of closed-data models. We conclude that the strength of linear representations in LMs contains signal about the models' pretraining corpora that may provide new avenues for controlling and improving model behavior: particularly, manipulating the models' training data to meet specific frequency thresholds. We release our code to support future work. $^{1}$", + "bbox": [ + 228, + 311, + 767, + 674 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 173, + 700, + 336, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Understanding how the content of pretraining data affects language model (LM) behaviors and performance is an active area of research (Ma et al., 2024; Xie et al., 2023; Aryabumi et al., 2025; Longpre et al., 2024; Wang et al., 2025; Seshadri et al., 2024; Razeghi et al., 2023; Wang et al., 2024). For instance, it has been shown that for specific tasks, models perform better on instances containing higher frequency terms than lower frequency ones (Razeghi et al., 2022; Mallen et al., 2023; McCoy et al., 2024). However, the ways in which frequency affects the internal representations of LMs to cause this difference in performance remain unclear. We connect dataset statistics to recent work in interpretability, which focuses on the emergence of simple linear representations of factual relations in LMs Hernandez et al. (2024); Chanin et al. (2024). Our findings demonstrate a strong correlation between these linear representations and the frequency of terms in the pretraining corpus.", + "bbox": [ + 169, + 732, + 826, + 886 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1Code is available at https://github.com/allenai/freq, and for efficient batch search at https://github.com/allenai/batchsearch.", + "bbox": [ + 169, + 897, + 823, + 924 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.12459v1 [cs.CL] 16 Apr 2025", + "bbox": [ + 22, + 265, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Linear representations in LMs have become central to interpretability research in recent years (Ravfogel et al., 2020; Elazar et al., 2021; Elhage et al., 2021; Slobodkin et al., 2023; Olah et al., 2020; Park et al., 2024; Jiang et al., 2024; Black et al., 2022; Chanin et al., 2024). Linear representations are essentially linear approximations (linear transforms, directions in space) that are simple to understand, and strongly approximate the complex non-linear transformations that networks are implementing. These representations are crucial because they allow us to localize much of the behavior and capabilities of LMs to specific directions in activation space. This allows for simple interventions to control model behaviors, i.e., steering (Todd et al., 2024; Subramani et al., 2022; Hendel et al., 2023; Rimsky et al., 2024).", + "bbox": [ + 169, + 103, + 826, + 229 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent work by Hernandez et al. (2024) and Chanin et al. (2024) highlight how the linearity of different types of relations varies greatly depending on the specific relationships being depicted. For example, over $80\\%$ of entities in the \"country-largest-city\" relation, but less than $30\\%$ of entities in the \"star-in-constellation\" relation can be approximated this way (Hernandez et al., 2024). Such findings complicate the understanding of the Linear Representation Hypothesis, which proposes that LMs will represent features linearly (Park et al., 2024) without providing when/why these form. While Jiang et al. (2024) provide both theoretical and empirical evidence that the training objectives of LMs implicitly encourage linear representations, it remains unclear why some features are represented this way while others are not. This open question is a central focus of our investigation.", + "bbox": [ + 169, + 234, + 826, + 363 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Whether linear representations for \"common\" concepts are more prevalent in models or simply easier to identify (using current methods) than those for less common concepts remains unclear. We hypothesize that factual relations exhibiting linear representations are correlated with higher mention frequencies in the pretraining data (as has been shown with static embeddings, see Ethayarajh et al., 2019), which we confirm in Section 4. Our results also indicate that this can occur at any point in pretraining, as long as a certain average frequency is reached across subject-object pairs in a relation. In order to count the appearance of terms in data corpora throughout training, we develop an efficient tool for counting tokens in tokenized batches of text, which we release to support future work in this area. We also explore whether the presence of linear representations can provide insights into relation term frequency. In Section 5, we fit a regression model to predict the frequency of individual terms (such as \"The Beatles\") in the pretraining data, based on metrics measuring the presence of a linear representation for some relation. For example, how well a linear transformation approximates the internal computation of the \"lead-singer-of\" relation mapping \"John Lennon\" to \"The Beatles\" can tell us about the frequency of those terms in the pretraining corpus.", + "bbox": [ + 169, + 368, + 826, + 564 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our findings indicate that the predictive signal, although approximate, is much stronger than that encoded in log probabilities and task accuracies alone, allowing us to estimate the frequencies of held-out relations and terms within approximate ranges. Importantly, this regression model generalizes beyond the specific LM it was trained on without additional supervision. This provides a valuable foundation for analyzing the pretraining corpora of closed-data models with open weights.", + "bbox": [ + 169, + 569, + 826, + 642 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To summarize, in this paper we show that:", + "bbox": [ + 171, + 646, + 454, + 661 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. The development of linear representations for factual recall relations in LMs is related to frequency as well as model size.", + "2. Linear representations form at predictable frequency thresholds during training, regardless of when this frequency threshold is met for the nouns in the relation. The formation of these representations also correlates strongly with recall accuracy.", + "3. Measuring the extent to which a relation is represented linearly in a model allows us to predict the approximate frequencies of individual terms in the pretraining corpus of that model, even when we do not have access to the model's training data.", + "4. We release a tool for accurately and efficiently searching through tokenized text to support future research on training data." + ], + "bbox": [ + 207, + 684, + 823, + 905 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/bba2f19318fa711c44305749d8472200aa8f1ab50e866a7ac5021954af5081a2.jpg", + "image_caption": [ + "Figure 1: Overview of this work. Given a dataset of subject-relation-object factual relation triplets, we count subject-object co-occurrences throughout pretraining batches. We then measure how well the corresponding relations are represented within an LM across pretraining steps, using the Linear Relational Embeddings (LRE) method from Hernandez et al. (2024). We establish a strong relationship between average co-occurrence frequency and a model's tendency to form linear representations for relations. From this, we show that we can predict frequencies in the pretraining corpus" + ], + "image_footnote": [], + "bbox": [ + 254, + 102, + 730, + 333 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 BACKGROUND", + "text_level": 1, + "bbox": [ + 171, + 458, + 328, + 474 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 LINEAR REPRESENTATIONS", + "text_level": 1, + "bbox": [ + 171, + 491, + 408, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Vector space models have a long history in language processing, where geometric properties of these spaces were used to encode semantic information (Salton et al., 1975; Paccanaro & Hinton, 2001). When and why linear structure emerges without explicit bias has been of considerable interest since the era of static word embeddings. Work on skipgram models (Mikolov et al., 2013a) found that vector space models of language learn regularities which allow performing vector arithmetic between word embeddings to calculate semantic relationships (e.g., France - Paris + Spain = Madrid) (Mikolov et al., 2013b; Pennington et al., 2014). This property was subject to much debate, as it was not clear why word analogies would appear for some relations and not others (Köper et al., 2015; Karpinska et al., 2018; Gladkova et al., 2016). Followup work showed that linguistic regularities form in static embeddings for relations under specific dataset frequency constraints for relevant terms (Ethayarajh et al., 2019), but does not clearly relate to how modern LMs learn. More recently, there has been renewed interest in the presence of similar linear structure in models with contextual embeddings like transformer language models (Park et al., 2024; Jiang et al., 2024; Merullo et al., 2024). As a result, there are many ways to find and test for linear representations in modern LMs, though the relationship to pretraining data was not addressed (Huben et al., 2024; Gao et al., 2025; Templeton et al., 2024; Rimsky et al., 2024; Todd et al., 2024; Hendel et al., 2023; Hernandez et al., 2024; Chanin et al., 2024). Many of these share similarities in how they compute and test for linear representations. We focus on a particular class of linear representations called Linear Relational Embeddings (LREs) (Paccanaro & Hinton, 2001).", + "bbox": [ + 169, + 517, + 826, + 782 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Linear Relational Embeddings (LREs) Hernandez et al. (2024) use a particular class of linear representation called a Linear Relational Embedding (Paccanaro & Hinton, 2001) to approximate the computation performed by a model to predict the objects that complete common subject-relation-object triplets as an affine transformation. This transform is calculated from a hidden state $\\mathbf{s}$ , the subject token representation at some middle layer of the model, to $\\mathbf{o}$ , the hidden state at the last token position and layer of the model (i.e., the final hidden state that decodes a token in an autoregressive transformer) within a natural language description of the relation. For example, given the input sequence \"Miles Davis (subject) plays the (relation)\", the goal is to approximate the computation of the object \"trumpet\", assuming the model predicts the object cor-", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "directly. It was found that this transformation holds for nearly every subject and object in the relation set (such as \"Cat Stevens plays the guitar\") for some relations. This is surprising because, despite the nonlinearities within the many layers and token positions separating s and o, a simple structure within the representation space well approximates the model's prediction process for a number of factual relations. In this work we study LREs under the same definition and experimental setup, because it allows us to predefine the concepts we want to search for (e.g., factual relations), as well as use a handful of representations to relate thousands of terms in the dataset by learning linear representations on a per-relation level.", + "bbox": [ + 169, + 103, + 826, + 217 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hernandez et al. calculate LREs to approximate an LM's computation as a first-order Taylor Series approximation. Let $F(\\mathbf{s}, c) = \\mathbf{o}$ be the forward pass through a model that produces object representation $\\mathbf{o}$ given subject representation $\\mathbf{s}$ and a few-shot context $c$ , this computation is approximated as $F(\\mathbf{s}, c) \\approx W\\mathbf{s} + b = F(\\mathbf{s}_i, c) + W(\\mathbf{s} - \\mathbf{s}_i)$ where we approximate the relation about a specific subject $\\mathbf{s}_i$ . Hernandez et al. propose to compute $W$ and $b$ using the average of $n$ examples from the relation $(n = 8$ here) with $\\frac{\\partial F}{\\partial\\mathbf{s}}$ representing the Jacobian Matrix of $F$ :", + "bbox": [ + 169, + 220, + 826, + 309 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nW = \\mathbb {E} _ {\\mathbf {s} _ {i}, c _ {i}} \\left[ \\left. \\frac {\\partial F}{\\partial \\mathbf {s}} \\right| _ {(\\mathbf {s} _ {i}, c _ {i})} \\right] \\quad \\text {a n d} \\quad b = \\mathbb {E} _ {\\mathbf {s} _ {i}, c _ {i}} \\left[ \\left. F (\\mathbf {s}, c) - \\frac {\\partial F}{\\partial \\mathbf {s}} \\right| _ {(\\mathbf {s} _ {i}, c _ {i})} \\right] \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 266, + 316, + 825, + 359 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In practice, LREs are estimated using hidden states from LMs during the processing of the test example in a few-shot setup. For a relation like \"instrument-played-by-musician\", the model may see four examples (in the form \"[X] plays the [Y]\") and on the fifth example, when predicting e.g., \"trumpet\" from \"Miles Davis plays the\", the subject representation s and object representation o are extracted.", + "bbox": [ + 169, + 375, + 823, + 444 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 INFERRING TRAINING DATA FROM MODELS", + "text_level": 1, + "bbox": [ + 171, + 463, + 521, + 478 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "There has been significant interest recently in understanding the extent to which it is possible to infer the training data of a fully trained neural network, including LMs, predominantly by performing membership inference attacks (Shokri et al., 2017; Carlini et al., 2022), judging memorization of text (Carlini et al., 2023; Oren et al., 2024; Shi et al., 2024), or inferring the distribution of data sources (Hayase et al., 2024; Ateniese et al., 2015; Suri & Evans, 2022). Our work is related in that we find hints of the pretraining data distribution in the model itself, but focus on how linear structure in the representations relates to training data statistics.", + "bbox": [ + 169, + 489, + 826, + 589 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 METHODS", + "text_level": 1, + "bbox": [ + 171, + 611, + 292, + 626 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our analysis is twofold: counts of terms in the pretraining corpus of LMs, and measurements of how well factual relations are approximated by affine transformations. We use the OLMo model v1.7 (0424 7B and 0724 1B) (Groeneveld et al., 2024) and GPT-J (6B) (Wang & Komatsuzaki, 2021) and their corresponding datasets: Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), respectively. To understand how these features form over training time, we test eight model checkpoints throughout training in the OLMo family of models (Groeneveld et al., 2024).", + "bbox": [ + 169, + 643, + 826, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 LINEAR RELATIONAL EMBEDDINGS (LRES) IN LMS", + "text_level": 1, + "bbox": [ + 171, + 747, + 581, + 762 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We use a subset of the RELATIONS dataset Hernandez et al. (2024), focusing on the 25 factual relations of the dataset, such as capital-city and person-mother (complete list in Appendix B). Across these relations, there are 10,488 unique subjects and objects. Following Hernandez et al. (2024), we fit an LRE for each relation on 8 examples from that relation, each with a 5-shot prompt. We use the approach from this work as described in Section 2.1.", + "bbox": [ + 169, + 773, + 823, + 844 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "For the analysis, we drop \"landmark-on-continent\" because $74\\%$ of the answers are Antarctica, making it potentially confounding for extracting a representation for the underlying relation. Factual relations are much easier to get accurate counts for, so we leave non-factual relations for future work (e.g., although LMs associate the \"pilot\" occupation with men, this relation does not map to the word \"man\" the way \"France\" maps to \"Paris\"; see §3.2).", + "bbox": [ + 169, + 859, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Fitting LREs Hernandez et al. (2024) find that Equation 1 underestimates the optimal slope of the linear transformation, so they scale each relation's $W$ by a scalar hyperparameter $\\beta$ . Unlike the original work, which finds one $\\beta$ per model, we use one $\\beta$ per relation, as this avoids disadvantageing specific relations. Another difference in our calculation of LREs is that we do not impose the constraint that the model has to predict the answer correctly to be used as one of the 8 examples used to approximate the Jacobian Matrix. Interestingly, using examples that models predict incorrectly to fit Equation 1 works as well as using only correct examples. We opt to use this variant as it allows us to compare different checkpoints and models (\\$4) with linear transformations trained on the same 8 examples, despite the fact that the models make different predictions on these instances. We explore the effect of example choice in Appendix B and find that it does not make a significant difference. We also explore the choice of layer in Appendix C.", + "bbox": [ + 169, + 103, + 826, + 257 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Metrics To evaluate the quality of LREs, Hernandez et al. (2024) introduce two metrics that measure the quality of the learned transformations. Faithfulness measures whether the transformation learned by the LRE produces the same object token prediction as the original LM. Causality measures the proportion of the time a prediction of an object can be changed to the output of a different example from the relation (e.g., editing the Miles Davis subject representation so that the LM predicts he plays the guitar, instead of the trumpet). For specifics on implementation, we refer the reader to Hernandez et al. (2024). We consider an LRE to be high 'quality' when it scores highly on these metrics, as this measures when an LRE works across subject-object pairs within the relation. In general, we prefer to use causality in our analysis, as faithfulness can be high when LMs predict the same token very often (like in early checkpoints).", + "bbox": [ + 169, + 286, + 826, + 428 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 COUNTING FREQUENCIES THROUGHOUT TRAINING", + "text_level": 1, + "bbox": [ + 171, + 455, + 576, + 472 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A key question we explore is how term frequencies affect the formation of linear representations. We hypothesize that more commonly occurring relations will lead to higher quality LREs for those relations. Following Elsahar et al. (2018); Elazar et al. (2022), we count an occurrence of a relation when a subject and object co-occur together. While term co-occurrence is used as a proxy for the frequency of the entire triplet mentioned in text, Elsahar et al. (2018) show that this approximation is quite accurate. We now discuss how to compute these co-occurrence counts.", + "bbox": [ + 169, + 488, + 823, + 574 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "What's in My Big Data? (WIMBD) Elazar et al. (2024) index many popular pretraining datasets, including Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), and provide search tools that allow for counting individual terms and co-occurrences within documents. However, this only gives us counts for the full dataset. Since we are interested in counting term frequencies throughout pretraining, we count these within training batches of OLMo instead. When per-batch counts are not available, WIMBD offers a good approximation for final checkpoints, which is what we do in the case of GPT-J. We compare WIMBD co-occurrence counts to the Batch Search method (described below) for the final checkpoint of OLMo in Appendix D, and find that the counts are extremely close: The slope of the best fit line for BatchCount against WIMBDCount is .94, because co-occurrence counts are overestimated when considering the whole document.", + "bbox": [ + 169, + 602, + 826, + 742 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Batch Search Data counting tools cannot typically provide accurate counts for model checkpoints at arbitrary training steps. Thus, we design a tool to efficiently count exact co-occurrences within sequences of tokenized batches. This also gives us the advantage of counting in a way that is highly accurate to how LMs are trained; since LMs are trained on batches of fixed lengths which often split documents into multiple sequences, miscounts may occur unless using tokenized sequences. Using this method, we note every time one of our 10k terms appears throughout a dataset used to pretrain an LM. We count a co-occurrence as any time two terms appear in the same sequence within a batch (a (batch-size, sequence-length) array). We search 10k terms in the approximately 2T tokens of Dolma (Soldaini et al., 2024) this way. Using our implementation, we are able to complete this on 900 CPUs in about a day. To support future work, we release our code as Cython bindings that integrate out of the box with existing libraries.", + "bbox": [ + 169, + 771, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/992f1844b981d1812454b47d5c56dafde0f907001c4704149adb3c14d065df3a.jpg", + "image_caption": [ + "OLMo-7B 0424 Development of LREs over Training Time" + ], + "image_footnote": [ + "Final Model" + ], + "bbox": [ + 176, + 126, + 483, + 224 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e1028e25c5571fc63b95029cead2ac2fe6d2c38efd73e2bba131960e2b0ae469.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelCo-Occurrence Threshold (Mean Causality >.9)
GPT-J (6B)1,097
OLMo-7B1,998
OLMo-1B4,447
", + "bbox": [ + 532, + 142, + 820, + 215 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ad620ec86229e5de638a565490e5d6940af4a72fde26a3c5f92185684ac231db.jpg", + "image_caption": [ + "OLMo-1B 0724 Development of LREs over Training Time" + ], + "image_footnote": [], + "bbox": [ + 176, + 253, + 483, + 354 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/1cd813e2fb83f1b4351e3155ec2db6833f96e31dd0f85d6bd6bd9a6cf6a6115e.jpg", + "image_caption": [ + "GPT-J Development of LREs over Training Time", + "Figure 2: We find that LREs have consistently high causality scores across relations after some average frequency threshold is reached (table, top right). In OLMo models, red dots show the model's LRE performance at 41B tokens, and blue dots show the final checkpoint performance (550k steps in 7B). Gray dots show intermediate checkpoints. We highlight Even at very early training steps, if the average subject-object cooc. count is high enough, the models are very likely to already have robust LREs formed in the representation space. Symbols represent different relations. Highlighted relations are shown in darker lines." + ], + "image_footnote": [ + "41B Tokens (10k steps)" + ], + "bbox": [ + 506, + 255, + 810, + 354 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 FREQUENCY OF SUBJECT-OBJECT CO-OCCURRENCES ALIGNS WITH EMERGENCE OF LINEAR REPRESENTATIONS", + "text_level": 1, + "bbox": [ + 169, + 474, + 777, + 508 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we explore when LREs begin to appear at training time and how these are related to pretraining term frequencies. Our main findings are that (1) average co-occurrence frequency within a relation strongly correlates with whether an LRE will form; (2) the frequency effect is independent of the pretraining stage; if the average subject-object co-occurrence for a relation surpasses some threshold, it is very likely to have a high-quality LRE, even for early pretraining steps.", + "bbox": [ + 169, + 526, + 823, + 597 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 SETUP", + "text_level": 1, + "bbox": [ + 171, + 614, + 263, + 628 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Using the factual recall relations from the Hernandez et al. (2024) dataset, we use the Batch Search method (§3.2) to count subject and object co-occurrences within sequences in Dolma (Soldaini et al., 2024) used to train the OLMo-1B (v. 0724) and 7B (v. 0424) models (Groeneveld et al., 2024). The OLMo family of models provides tools for accurately recreating the batches from Dolma, which allow us to reconstruct the data the way the model was trained. We also use GPT-J (Wang & Komatsuzaki, 2021) and the Pile (Gao et al., 2020) as its training data, but since we do not have access to accurate batches used to train it, we use WIMBD (Elazar et al., 2024) to count subject-object counts in the entire data. We fit LREs on each relation and model separately. Hyperparameter sweeps are in Appendix C. OLMo also releases intermediate checkpoints, which we use to track development over pretraining time. We use checkpoints that have seen {41B, 104B, 209B, 419B, 628B, 838B, 1T, and 2T} tokens.3 We use the Pearson coefficient for measuring correlation.", + "bbox": [ + 169, + 642, + 826, + 796 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 RESULTS", + "text_level": 1, + "bbox": [ + 171, + 814, + 277, + 828 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our results are summarized in Figure 2. We report training tokens because the step count differs between 7B and 1B. Co-occurrence frequencies highly correlate with causality $(r = 0.82)$ . This", + "bbox": [ + 169, + 840, + 823, + 869 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3In OLMo-7B 0424, this corresponds to $10\\mathrm{k}$ 25k, 50k, 100k, 150k, 200k, 250k, 409k pretraining steps", + "bbox": [ + 192, + 883, + 802, + 898 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "These are: 'country largest city', 'country currency', 'company hq', 'company CEO', and 'star constellation name' in order from best to worst performing final checkpoints.", + "bbox": [ + 173, + 898, + 823, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "is notably higher than the correlations with subject frequencies: $r = 0.66$ , and object frequencies: $r = 0.59$ for both OLMo-7B and OLMo-1B, respectively.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We consider a causality score above 0.9 to be nearly perfectly linear. The table in Figure 2 shows the co-occurrence counts above which the average causality is above 0.9 and is shown by dashed black lines on the scatterplots. Regardless of pretraining step, models that surpass this threshold have very high causality scores. Although we cannot draw conclusions from only three models, it is possible that scale also affects this threshold: OLMo-7B and GPT-J (6B params) require far less exposure than OLMo-1B.", + "bbox": [ + 169, + 138, + 826, + 223 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 RELATIONSHIP TO ACCURACY", + "text_level": 1, + "bbox": [ + 171, + 239, + 426, + 253 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Increased frequency (or a proxy for it) was shown to lead to better factual recall in LMs (Chang et al., 2024; Mallen et al., 2023). However, it remains unknown whether high accuracy entails the existence of a linear relationship. Such a finding would inform when we expect an LM to achieve high accuracy on a task. We find that the correlation between causality and subject-object frequency is higher than with 5-shot accuracy (0.82 v.s. 0.74 in OLMo-7B), though both are clearly high. In addition, there are a few examples of high accuracy relations that do not form single consistent LREs. These relations are typically low frequency, such as star constellation name, which has $84\\%$ 5-shot accuracy but only $44\\%$ causality (OLMo-7B), with subjects and objects only co-occurring about 21 times on average across the full dataset. In general, few-shot accuracy closely tracks causality, consistent with arguments that in-context learning allows models to identify linear mappings between input-output pairs (Hendel et al., 2023; Garg et al., 2022). We find that causality increases first in some cases, like \"food-from-country\" having a causality of $65\\%$ but a 5-shot accuracy of only $42\\%$ . This gap is consistently closed through training. In the final model, causality and 5-shot accuracy are within $11\\%$ on average. We report the relationship between every relation, zero-shot, and few-shot accuracy for OLMo models across training in Appendix F.", + "bbox": [ + 169, + 263, + 826, + 474 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "A fundamental question in the interpretability community is under what circumstances linear structures form. While previous work has shown that the training objective encourages this type of representation (Jiang et al., 2024), our results suggest that the reason why some concepts form a linear representation while others do not is strongly related to the pretraining frequency.", + "bbox": [ + 169, + 479, + 825, + 537 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 LINEAR REPRESENTATIONS HELP PREDICT PRETRAINING CORPUS FREQUENCIES", + "text_level": 1, + "bbox": [ + 171, + 555, + 764, + 589 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we aim to understand this relationship further by exploring what we can understand about pretraining term frequency from linearity of LM representations. We target the challenging problem of predicting how often a term, or co-occurrence of terms, appears in an LM's training data from the representations alone. Such prediction model can be useful, if it generalizes, when applied to other models whose weights are open, but the data is closed. For instance, such predictive model could tell us whether a model was trained on specific domains (e.g., Java code) by measuring the presence of relevant LREs. First, we show that LRE features encode information about frequency that is not present using probabilities alone. Then, we show how a regression fit on one model generalizes to the features extracted from another without any information about the new model's counts.", + "bbox": [ + 169, + 603, + 826, + 743 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.1 EXPERIMENTAL SETUP", + "text_level": 1, + "bbox": [ + 171, + 758, + 375, + 772 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We fit a regression to the Relations dataset (Hernandez et al., 2024) using OLMo-7B LRE features and log probabilities. We fit 24 models such that each relation is held out once per random seed across 4 seeds. We train a random forest regression model with 100 decision tree estimators to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting \"John Lennon\" and \"The Beatles\" or just \"The Beatles\") from one of two sets of features. Our baseline set of features is based on likelihood of recalling a fact. Given some few-shot context from the relations dataset (\"John Lennon is a lead singer of\") we extract the log probability of the correct answer, as well as the average accuracy on this prompt across 5 trials. The intuition is that models will be more confident about highly frequent terms. The other set of features include the first, as well as faithfulness and causality measurement.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e7c13d5cff0901794019ccc7b0ddce6b894588c779c409326c18dc7019fafaf9.jpg", + "image_caption": [ + "Figure 3: Within-Magnitude accuracy (aka the proportion of predictions within one order of magnitude of ground truth) for models predicting object and subject-object co-occurrences in heldout relations. Using LRE features outperforms LM only features by about $30\\%$ . We find that it is much easier to predict object frequencies; the subj-object prediction models with LRE features only marginally outperform baseline performance." + ], + "image_footnote": [], + "bbox": [ + 238, + 103, + 761, + 262 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We use Faithfulness and Causality as defined in Hernandez et al. (2024) as well as two other metrics: Faith Prob., which is the log probability of the correct answer as produced by an LRE, and Hard Causality, which is the same as the \"soft\" variant, but only counts the proportion of times the causality edit produces the target answer as the number one prediction. We use every example from the relations for which there are more than one object occurrence or subject-object co-occurrence. We do not provide an explicit signal for which relation an example comes from, but due to the bias of subjects/objects having similar frequencies within a relation, we train multiple models and evaluate on held out relations and average performance. In all settings, the held out set objects and relations are guaranteed to not have been in the training set.", + "bbox": [ + 169, + 361, + 823, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 LRE METRICS ENCODE FINE-GRAINED FREQUENCY INFORMATION", + "text_level": 1, + "bbox": [ + 171, + 507, + 683, + 522 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Because of the difficulty of predicting the exact number of occurrences, we report accuracy within one order of magnitude of the ground truth. This measures whether the predicted value is within a reasonable range of the actual value. Results are shown in Figure 3. We find that language modeling features do not provide any meaningful signal towards predicting object or subject-object frequencies, and are only marginally above the baseline of predicting the average or random frequencies from the training data. On object frequency predictions, we find that LRE features encode a strong signal allowing for accurate predictions about $70\\%$ of the time. Mean absolute error of the predictions (in natural log space) for LRE features (LM-only features) are 2.1, (4.2) and 1.9, (2.3) on object prediction and subject-object prediction tasks, respectively. We find that subject-object cooccurrence frequency is likely too difficult to predict given the signals that we have here, as our predictions are higher than, but within one standard deviation of the mean baseline.", + "bbox": [ + 169, + 535, + 823, + 688 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Feature Importance: How important are LRE features for predicting the frequency of an item? We perform feature permutation tests to see how much each feature (LRE features and log probts) contributes to the final answer. First, we check to see which features used to fit the regression are correlated, as if they are, then perturbing one will leave the signal present in another. In Appendix E, we show that only faithfulness and faith probability are strongly correlated, so for this test only, we train models with a single PCA component representing $89\\%$ of the variance of those two features. We find that hard causality is by far the most important feature for generalization performance, causing a difference of about $15\\%$ accuracy, followed by faithfulness measures with $5\\%$ accuracy, providing evidence that the LRE features are encoding an important signal.", + "bbox": [ + 169, + 707, + 823, + 834 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 GENERALIZATION TO A NEW LM", + "text_level": 1, + "bbox": [ + 171, + 854, + 444, + 868 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Next, we test the ability to generalize the regression fit of one LM to another, without requiring further supervision. If such a model could generalize, we can predict term counts to models for which we do not have access to their pretraining data. We keep the objective the same and apply", + "bbox": [ + 169, + 881, + 823, + 926 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c2a7842bab1f0f19feae7d25b7657ab844bac1cd20780d934a25f89f25c90b73.jpg", + "table_caption": [ + "Table 1: Within-Magnitude accuracy for different settings of train and test models. Overall, we find that fitting a regression on one model's LREs and evaluating on the other provides a meaningful signal compared to fitting using only log probability and task performance, or predicting the average training data frequency. The metric here is proportion of predictions within one order of $10\\mathrm{x}$ the ground truth. Here, Eval. on GPT-J means the regression is fit on OLMo and evaluated on GPT-J." + ], + "table_footnote": [], + "table_body": "
ModelPredicting Object Occs.Predicting Subject-Object Co-Occs.
Eval. on GPT-JEval. on OLMoEval. on GPT-JEval. on OLMo
LRE Features0.65±0.120.49±0.120.76±0.120.68±0.08
LogProb Features0.42±0.100.41±0.090.66±0.090.60±0.07
Mean Freq. Baseline0.31±0.150.41±0.170.57±0.150.67±0.16
", + "bbox": [ + 176, + 183, + 820, + 263 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "the regression model, fit for example on OLMo (\"Train OLMo\" setting), to features extracted from GPT-J, using ground truth counts from The Pile (and vice versa, i.e., the \"Train GPT-J\" setting).", + "bbox": [ + 169, + 277, + 823, + 308 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We again train a random forest regression model to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting \"John Lennon\" and \"The Beatles\" or just \"The Beatles\") on features from one of two models: either OLMo-7B (final checkpoint) or GPT-J, treating the other as the 'closed' model. We test the hypothesis that LRE features (faithfulness, causality) are useful in predicting term frequencies across different models, with the hope that this could be applied to dataset inference methods in the future, where access to the ground truth pretraining data counts is limited or unavailable.", + "bbox": [ + 169, + 313, + 826, + 412 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results Our results are presented in Table 1. First, we find that there is a signal in the LRE features that does not exist in the log probability features: We are able to fit a much better generalizable model when using LRE features as opposed to the LM probabilities alone. Second, evaluating on the LRE features of a heldout model (scaled by the ratio of total tokens trained between the two models) maintains around the same accuracy when fit on exact counts from OLMo, allowing us to predict occurrences without access to the GPT-J pretraining data. We find that predicting either the subject-object co-occurrences or object frequencies using LREs alone is barely better than the baseline. This task is much more difficult than predicting the frequency of the object alone, but our model may just also be unable to account for outliers in the data, which is tightly clustered around the mean (thus giving the high mean baseline performance of between approx. $60 - 70\\%$ ). Nevertheless, we show that linear structure for relations within LM representations encode a rich signal representing dataset frequency.", + "bbox": [ + 169, + 433, + 826, + 602 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4 ERROR ANALYSIS", + "text_level": 1, + "bbox": [ + 171, + 623, + 341, + 637 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In Table 2 we show example predictions from our regression model that we fit on OLMo and evaluate on heldout relations with LREs measured on GPT-J. We find that some relations transfer more easily than others, with the star constellation name transferring especially poorly. In general, the regression transfers well, without performance deteriorating much (about $5\\%$ accuracy: see Figure 3 compared to the evaluation of GPT-J in Table 1), suggesting LREs encode information in a consistent way across models. We also find that the regression makes use of the full prediction range, producing values in the millions (see Table 2) as well as in the tens; The same regression shown in the table also predicts 59 occurrences for \"Caroline Bright\" (Will Smith's mother) where the ground truth is 48.", + "bbox": [ + 169, + 652, + 826, + 777 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 DISCUSSION", + "text_level": 1, + "bbox": [ + 171, + 805, + 310, + 820 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Connection to Factual Recall Work in interpretability has focused largely around linear representations in recent years, and our work aims to address the open question of the conditions in which they form. We find that coherent linear representations form when the relevant terms (in this case subject-object co-occurrences) appear in pretraining at a consistent enough rate. Analogously, Chang et al. (2024) show that repeated exposure encourages higher retention of facts. Future work could investigate the connection between factual recall accuracy and linear representations.", + "bbox": [ + 169, + 839, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/df51c4633c26fe9b44c40708c2d148d48f2d69e5e1103b5e63f1c5f548f99183.jpg", + "table_caption": [ + "Table 2: Examples of a regression fit on OLMo LRE metrics and evaluated on GPT-J on heldout relations, demonstrating common error patterns: 1. Predictions are better for relations that are closer to those found in fitting the relation (country related relations), 2. Some relations, like star-constellation perform very poorly, possibly due to low frequency, 3. The regression model can be sensitive to the choice of subject (e.g., William vs. Harry), telling us the choice of data to measure LREs for is important for predictions." + ], + "table_footnote": [], + "table_body": "
Predicting Object Frequency in GPT-J, Regression fit on OLMo
RelationSubjectObjectPredictionGround TruthError
landmark-in-countryMenangle ParkAustralia2,986,9893,582,6021.2x
country-languageBrazilPortuguese845,406561,0051x
star-constellation nameArcturusBoötes974,5502,817346x
person-motherPrince WilliamPrincess Diana5,82627,0944.6x
person-motherPrince HarryPrincess Diana13127,094207x
", + "bbox": [ + 176, + 196, + 821, + 308 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Linear Representations in LMs The difficulty of disentangling the formation of linear representations from increases in relation accuracy, especially in the few-shot case, is interesting. Across 24 relations, only the \"star-constellation-name\" and \"product-by-company\" relations have few-shot accuracies that far exceed their causality scores (and both are low frequency). Thus, it is still an open question how LMs are able to recall these tasks. The fact that few-shot accuracy and causality seem so closely linked is consistent with findings that ICL involves locating the right task (Min et al., 2022) and applying a 'function' to map input examples to outputs (Hendel et al., 2023; Todd et al., 2024). The finding that frequency controls this ability is perhaps unsurprising, as frequency also controls this linear structure emerging in static embeddings (Ethayarajh et al., 2019). Jiang et al. (2024) prove a strong frequency-based condition (based on matched log-odds between subjects and objects) and an implicit bias of gradient descent (when the frequency condition is not met) encourage linearity in LLMs; our work empirically shows conditions where linear representations tend to form in more realistic settings. If LMs are 'only' solving factual recall or performing ICL through linear structures, it is surprising how well this works at scale, but the simplicity also provides a promising way to understand LMs and ICL in general. An interesting avenue for future work would be to understand if and when LMs use a method that is not well approximated linearly to solve these types of tasks, as recent work has shown non-linearity can be preferred for some tasks in recurrent networks (Csordás et al., 2024).", + "bbox": [ + 169, + 323, + 826, + 575 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Future Work in Predicting Dataset Frequency The ability to predict the contents of pretraining data is an important area for investigating memorization, contamination, and privacy of information used to train models. In our approach, we show it is possible to extract pretraining data signal without direct supervision. Without interpretability work on the nature of representations in LMs, we would not know of this implicit dataset signal, and we argue that interpretability can generate useful insights more broadly as well. Extensions on this work could include more information to tighten the prediction bounds on frequency, such as extracting additional features from the tokenizer (Hayase et al., 2024). We hope this work encourages future research in other ways properties of pretraining data affect LM representations for both improving and better understanding these models.", + "bbox": [ + 169, + 601, + 823, + 728 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7 CONCLUSION", + "text_level": 1, + "bbox": [ + 171, + 760, + 320, + 776 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We find a connection between linear representations of subject-relation-object factual triplets in LMs and the pretraining frequencies of the subjects and objects in those relations. This finding can guide future interpretability work in deciphering whether a linear representation for a given concept will exist in a model, since we observe that frequencies below a certain threshold for a given model will not yield LREs (a particular class of linear representation). From there we show that we can use the presence of linear representations to predict with some accuracy the frequency of terms in the pretraining corpus of an open-weights, closed-data model without supervision. Future work could aim to improve on our bounds of predicted frequencies. Overall, our work presents a meaningful step towards understanding the interactions between pretraining data and internal LM representations.", + "bbox": [ + 169, + 797, + 823, + 924 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 946, + 509, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 171, + 103, + 356, + 118 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This work was performed while JM was an intern at Ai2. We thank the anonymous reviewers and members of the Aristo and AllenNLP teams at Ai2 for valuable feedback.", + "bbox": [ + 171, + 133, + 823, + 162 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 173, + 186, + 285, + 200 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. To code or not to code? exploring impact of code in pretraining. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=zSfeN1uAcx.", + "Giuseppe Ateniese, Luigi V Mancini, Angelo Spognardi, Antonio Villani, Domenico Vitali, and Giovanni Felici. Hacking smart machines with smarter ones: How to extract meaningful data from machine learning classifiers. International Journal of Security and Networks, 10(3):137-150, 2015. URL https://dl.acm.org/doi/10.1504/IJSN.2015.071829.", + "Sid Black, Lee Sharkey, Leo Grinsztajn, Eric Winsor, Dan Braun, Jacob Merizian, Kip Parker, Carlos Ramón Guevara, Beren Millidge, Gabriel Alfour, and Connor Leahy. Interpreting neural networks through the polytope lens, 2022. URL https://arxiv.org/abs/2211.12312.", + "Nicholas Carlini, Steve Chien, Milad Nasr, Shuang Song, Andreas Terzis, and Florian Tramér. Membership inference attacks from first principles. In 2022 IEEE Symposium on Security and Privacy (SP), pp. 1897-1914, 2022. URL https://ieeexplore.ieee.org/document/9833649/.", + "Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. Quantifying memorization across neural language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=TatRHT_1cK.", + "Hoyeon Chang, Jinho Park, Seonghyeon Ye, Sohee Yang, Youngkyung Seo, Du-Seong Chang, and Minjoon Seo. How do large language models acquire factual knowledge during pretraining? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=TYdzj1EvBP.", + "David Chanin, Anthony Hunter, and Oana-Maria Camburu. Identifying Linear Relational Concepts in Large Language Models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1524-1535. Association for Computational Linguistics, 2024. doi: 10.18653/v1/2024.naacl-long.85. URL https://aclanthology.org/2024.naacl-long.85.", + "Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. Recurrent neural networks learn to store and generate sequences using non-linear representations. In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 248-262, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.17. URL https://aclanthology.org/2024.blackboxnlp-1.17/.", + "Yanai Elazar, Shauli Ravfogel, Alon Jacovi, and Yoav Goldberg. Amnesic Probing: Behavioral Explanation with Amnesic Counterfactuals. Transactions of the Association for Computational Linguistics, 9:160-175, 03 2021. URL https://doi.org/10.1162/tacl_a_00359.", + "Yanai Elazar, Nora Kassner, Shauli Ravfogel, Amir Feder, Abhilasha Ravichander, Marius Mosbach, Yonatan Belinkov, Hinrich Schütze, and Yoav Goldberg. Measuring causal effects of data statistics on language model's 'factual' predictions. arXiv preprint arXiv:2207.14251, 2022. URL https://arxiv.org/abs/2207.14251." + ], + "bbox": [ + 173, + 209, + 825, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yanai Elazar, Akshita Bhagia, Ian Helgi Magnusson, Abhilasha Ravichander, Dustin Schwenk, Alane Suhr, Evan Pete Walsh, Dirk Groeneveld, Luca Soldaini, Sameer Singh, Hannaneh Hajishirzi, Noah A. Smith, and Jesse Dodge. What's in my big data? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RvfPnOkPV4.", + "Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 2021. URL https://transformer-circuits.pub/2021/framework/index.html.", + "Hady Elsahar, Pavlos Vougiouklis, Arslen Remaci, Christophe Gravier, Jonathon Hare, Frederique Laforest, and Elena Simperl. T-REx: A large scale alignment of natural language with knowledge base triples. In Nicoletta Calzolari, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Koiti Hasida, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, Stelios Piperidis, and Takenobu Tokunaga (eds.), Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan, May 2018. European Language Resources Association (ELRA). URL https://aclanthology.org/L18-1544.", + "Kawin Ethayarajh, David Duvenaud, and Graeme Hirst. Towards Understanding Linear Word Analogies. In Anna Korhonen, David Traum, and Lluís Márquez (eds.), Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 3253-3262. Association for Computational Linguistics, 2019. doi: 10.18653/v1/P19-1315. URL https://aclanthology.org/P19-1315.", + "Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020. URL https://arxiv.org/abs/2101.00027.", + "Leo Gao, Tom Dupre la Tour, Henk Tillman, Gabriel Goh, Rajan Troll, Alec Radford, Ilya Sutskever, Jan Leike, and Jeffrey Wu. Scaling and evaluating sparse autoencoders. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tcsZt9ZNKD.", + "Shivam Garg, Dimitris Tsipras, Percy Liang, and Gregory Valiant. What can transformers learn in-context? a case study of simple function classes. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=f1NZJ2eOet.", + "Anna Gladkova, Aleksandr Drozd, and Satoshi Matsuoka. Analogy-based detection of morphological and semantic relations with word embeddings: what works and what doesn't. In Jacob Andreas, Eunsol Choi, and Angeliki Lazaridou (eds.), Proceedings of the NAACL Student Research Workshop, pp. 8-15, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-2002. URL https://aclanthology.org/N16-2002/.", + "Dirk Groeneveld, Iz Beltagy, Evan Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. OLMo: Accelerating the science of language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15789-15809, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.841. URL https://aclanthology.org/2024.acl-long.841/.", + "Jonathan Hayase, Alisa Liu, Yejin Choi, Sewoong Oh, and Noah A. Smith. Data mixture inference: What do BPE tokenizers reveal about their training data? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=EHXyeImux0.", + "Roee Hendel, Mor Geva, and Amir Globerson. In-Context Learning Creates Task Vectors. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational" + ], + "bbox": [ + 171, + 102, + 826, + 925 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 946, + 509, + 960 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Linguistics: EMNLP 2023, pp. 9318-9333. Association for Computational Linguistics, 2023. doi: 10.18653/v1/2023-findings-emnlp.624. URL https://aclanthology.org/2023-findings-emnlp.624.", + "Evan Hernandez, Arnab Sen Sharma, Tal Haklay, Kevin Meng, Martin Wattenberg, Jacob Andreas, Yonatan Belinkov, and David Bau. Linearity of relation decoding in transformer language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=w7LU2s14kE.", + "Robert Huben, Hoagy Cunningham, Logan Riggs Smith, Aidan Ewart, and Lee Sharkey. Sparse autoencoders find highly interpretable features in language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=F76bwRSLeK.", + "Yibo Jiang, Goutham Rajendran, Pradeep Kumar Ravikumar, Bryon Aragam, and Victor Veitch. On the origins of linear representations in large language models. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=otuTw4Mghk.", + "Marzena Karpinska, Bofang Li, Anna Rogers, and Aleksandr Drozd. Subcharacter information in Japanese embeddings: When is it worth it? In Georgiana Dinu, Miguel Ballesteros, Avirup Sil, Sam Bowman, Wael Hamza, Anders Sogaard, Tahira Naseem, and Yoav Goldberg (eds.), Proceedings of the Workshop on the Relevance of Linguistic Structure in Neural Architectures for NLP, pp. 28-37, Melbourne, Australia, July 2018. Association for Computational Linguistics. doi: 10.18653/v1/W18-2905. URL https://aclanthology.org/W18-2905/.", + "Maximilian Köper, Christian Scheible, and Sabine Schulte im Walde. Multilingual reliability and \"semantic\" structure of continuous word spaces. In Matthew Purver, Mehrnoosh Sadrzadeh, and Matthew Stone (eds.), Proceedings of the 11th International Conference on Computational Semantics, pp. 40-45, London, UK, April 2015. Association for Computational Linguistics. URL https://aclanthology.org/W15-0105/.", + "Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 3245-3276, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.179. URL https://aclanthology.org/2024.naacl-long.179/.", + "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help LLMs reasoning? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KIPJKST4gw.", + "Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9802–9822, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.546. URL https://aclanthology.org/2023.acl-long.546.", + "R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Griffiths. Embers of autoregression show how large language models are shaped by the problem they are trained to solve. Proceedings of the National Academy of Sciences, 121(41):e2322420121, 2024. URL https://www.pnas.org/doi/abs/10.1073/pnas.2322420121.", + "Jack Merullo, Carsten Eickhoff, and Ellie Pavlick. Language models implement simple Word2Vec-style vector arithmetic. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational" + ], + "bbox": [ + 171, + 102, + 825, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5030-5047, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.281. URL https://aclanthology.org/2024.nacl-long.281.", + "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781, 2013a. URL https://arxiv.org/abs/1301.3781.", + "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In C.J. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K.Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013b. URL https://proceedings.neurips.cc/paper_files/paper/2013/file/9aa42b31882ec039965f3c4923ce901b-Paper.pdf.", + "Sewon Min, Xinxi Lyu, Ari Holtzman, Mikel Artetxe, Mike Lewis, Hannaneh Hajishirzi, and Luke Zettlemoyer. Rethinking the role of demonstrations: What makes in-context learning work? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 11048-11064, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.759. URL https://aclanthology.org/2022.emnlp-main.759/.", + "Chris Olah, Nick Cammarata, Ludwig Schubert, Gabriel Goh, Michael Petrov, and Shan Carter. Zoom in: An introduction to circuits. Distill, 5(3):e00024-001, 2020. URL https://distill.pub/2020/circuits/zoom-in/.", + "Yonatan Oren, Nicole Meister, Niladri S. Chatterji, Faisal Ladhak, and Tatsunori Hashimoto. Proving test set contamination in black-box language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KS8mIvetg2.", + "Alberto Paccanaro and Geoffrey E Hinton. Learning Hierarchical Structures with Linear Relational Embedding. In Advances in Neural Information Processing Systems, volume 14. MIT Press, 2001. URL https://papers.nips.cc/paper_files/paper/2001/bit/814a9c18f5abff398787c9cfcbf3d80c-Abstract.html.", + "Kiho Park, Yo Joong Choe, and Victor Veitch. The Linear Representation Hypothesis and the Geometry of Large Language Models. In *Forty-First International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=UGpGkLzwpP.", + "Jeffrey Pennington, Richard Socher, and Christopher Manning. GloVe: Global vectors for word representation. In Alessandro Moschitti, Bo Pang, and Walter Daelemans (eds.), Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1532-1543, Doha, Qatar, October 2014. Association for Computational Linguistics. doi: 10.3115/v1/D14-1162. URL https://aclanthology.org/D14-1162.", + "Shauli Ravfogel, Yanai Elazar, Hila Gonen, Michael Twiton, and Yoav Goldberg. Null it out: Guarding protected attributes by iterative nullspace projection. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7237-7256, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.647.", + "Yasaman Razeghi, Robert L Logan IV, Matt Gardner, and Sameer Singh. Impact of pretraining term frequencies on few-shot numerical reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 840-854, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.59. URL https://aclanthology.org/2022-findings-emnlp.59/.", + "Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In NeurIPS Workshop on Attributing Model Behavior at Scale, 2023. URL https://openreview.net/forum?id=EKvqw9k3lC." + ], + "bbox": [ + 173, + 103, + 825, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Nina Rimsky, Nick Gabrieli, Julian Schulz, Meg Tong, Evan Hubinger, and Alexander Turner. Steering llama 2 via contrastive activation addition. pp. 15504-15522, August 2024. doi: 10.18653/v1/2024.acl-long.828. URL https://aclanthology.org/2024.acl-long.828/.", + "G. Salton, A. Wong, and C. S. Yang. A vector space model for automatic indexing. Commun. ACM, 18(11):613-620, November 1975. ISSN 0001-0782. doi: 10.1145/361219.361220. URL https://doi.org/10.1145/361219.361220.", + "Naomi Saphra and Sarah Wiegrefe. Mechanistic? In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 480-498, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.30. URL https://aclanthology.org/2024.blackboxnlp-1.30/.", + "Preethi Seshadri, Sameer Singh, and Yanai Elazar. The bias amplification paradox in text-to-image generation. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 6367-6384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.353. URL https://aclanthology.org/2024.naacl-long.353/.", + "Weijia Shi, Anirudh Ajith, Mengzhou Xia, Yangsibo Huang, Daogao Liu, Terra Blevins, Danqi Chen, and Luke Zettlemoyer. Detecting pretraining data from large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=zWqr3MQuNs.", + "Reza Shokri, Marco Stronati, Congzheng Song, and Vitaly Shmatikov. Membership inference attacks against machine learning models. In 2017 IEEE Symposium on Security and Privacy (SP), pp. 3-18, 2017. doi: 10.1109/SP.2017.41. URL https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7958568.", + "Aviv Slobodkin, Omer Goldman, Avi Caciularu, Ido Dagan, and Shauli Ravfogel. The curious case of hallucinatory (un)answerability: Finding truths in the hidden states of over-confident large language models. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 3607-3625, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.220. URL https://aclanthology.org/2023.emnlp-main.220/.", + "Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, et al. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15725-15788, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.840. URL https://aclanthology.org/2024.acl-long.840/.", + "Nishant Subramani, Nivedita Suresh, and Matthew Peters. Extracting Latent Steering Vectors from Pretrained Language Models. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 566-581. Association for Computational Linguistics, 2022. doi: 10.18653/v1/2022-findings-acl.48. URL https://aclanthology.org/2022-findings-acl.48.", + "Anshuman Suri and David Evans. Formalizing and estimating distribution inference risks. Proceedings on Privacy Enhancing Technologies, 2022. URL https://arxiv.org/abs/2109.06024.", + "Adly Templeton, Tom Conerly, Jonathan Marcus, Jack Lindsey, Trenton Bricken, Brian Chen, Adam Pearce, Craig Citro, Emmanuel Ameisen, Andy Jones, et al. Scaling Monoseismicity: Extracting Interpretable Features from Claude 3 Sonnet. 2024. URL https://transformer-circuits.pub/2024/scaling-monoseismicity/index.html." + ], + "bbox": [ + 171, + 102, + 825, + 922 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Eric Todd, Millicent Li, Arnab Sen Sharma, Aaron Mueller, Byron C Wallace, and David Bau. Function vectors in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AwyxtyMwaG.", + "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021.", + "Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=dZsEOFUDew.", + "Xinyi Wang, Antonis Antoniades, Yanai Elazar, Alfonso Amayuelas, Alon Albalak, Kexun Zhang, and William Yang Wang. Generalization v.s. memorization: Tracing language models' capabilities back to pretraining data. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=IQxBDLmVpT.", + "Sang Michael Xie, Hieu Pham, Xuanyi Dong, Nan Du, Hanxiao Liu, Yifeng Lu, Percy Liang, Quoc V Le, Tengyu Ma, and Adams Wei Yu. Doremi: Optimizing data mixtures speeds up language model pretraining. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=1XuByUeHhd." + ], + "bbox": [ + 171, + 103, + 823, + 378 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A LIMITATIONS", + "text_level": 1, + "bbox": [ + 171, + 404, + 321, + 420 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "While our approach thoroughly tracks exposure to individual terms and formation of LRE features across pretraining, we can not draw causal6 claims about how exposure affects individual representations, due to the cost of counterfactual pretraining. We try to address this by showing the frequency of individual terms can be predicted with some accuracy from measurements of LRE presence. We motivate this approach as a possible way to detect the training data of closed-data LMs; however, we are not able to make any guarantees on its efficacy in settings not shown here, and would caution drawing strong conclusions without additional information. Furthermore, we find that our method is relatively worse at predicting subject-object co-occurrences than object occurrences, and our method fails to account for the harder task. Future work could expand on this tool by incorporating it with other data inference methods for greater confidence. We also do not discuss the role of the presentation of facts on the formation of LRE features, but following Elsahar et al. (2018) and the strength of the relationship we find, we speculate this has minimal impact. Note that the BatchSearch tool we release tracks the exact position index of the searched terms, thus facilitating future work on questions about templates and presentation of information.", + "bbox": [ + 169, + 435, + 826, + 630 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B EFFECT OF TRAINING ON INCORRECT EXAMPLES", + "text_level": 1, + "bbox": [ + 171, + 650, + 622, + 666 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In Hernandez et al. (2024), examples are filtered to ones in which the LM gets correct, assuming that an LRE will only exist once a model has attained the knowledge to answer the relation accuracy (e.g., knowing many country capitals). We find that the choice of examples for fitting LREs is not entirely dependent on the model 'knowing' that relation perfectly (i.e., attains high accuracy). This is convenient for our study, where we test early checkpoint models, that do not necessarily have all of the information that they will have seen later in training. In Figure 5, we show faithfulness on relations where the LRE was fit with all, half, or zero correct examples. We omit data for which the model did not get enough incorrect examples. Averages across relations for which we have enough data are shown in Figure 4, which shows that there is not a considerable difference in the choice of LRE samples to train with.", + "bbox": [ + 169, + 681, + 826, + 821 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C LRE HYPERPARAMETER TUNING", + "text_level": 1, + "bbox": [ + 171, + 840, + 488, + 857 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "There are three hyperparameters for fitting LREs: layer at which to edit the subject, the beta term used to scale the LRE weight matrix, and the rank of the pseudoinverse matrix used to make edits for", + "bbox": [ + 169, + 871, + 823, + 901 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_footnote", + "text": "6 And thus mechanistic, in the narrow technical sense of the term (Saphra & Wegreffe, 2024).", + "bbox": [ + 189, + 909, + 746, + 924 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 948, + 509, + 960 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/01813bff727a9fe1a067c13b336446f4f03f90c681bba96141df80054d9c6e2a.jpg", + "image_caption": [ + "Figure 4: Average Causality and Faithfulness results across relations depending on if the LRE was fit with correct or incorrect samples. We find no notable difference in the choice of examples." + ], + "image_footnote": [], + "bbox": [ + 173, + 133, + 496, + 276 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/e1bc4c029a94a41a2487a11963db8efb8a4bc436b13831cb494622be6967af91.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 133, + 821, + 275 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/b1468d161541d0304c9b2f6e11599e08df3b2e7d0401e29a1958c7c2c8462627.jpg", + "image_caption": [ + "Figure 5: Causality and Faithfulness results for each relation depending on if the LRE was fit with correct or incorrect samples. Note that relations with only one bar do not have zeros in the other categories. It means that there was not enough data that the model (OLMo-7B) got wrong to have enough examples to fit." + ], + "image_footnote": [], + "bbox": [ + 173, + 393, + 823, + 816 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/ccf55fa257666450f7d7be8f3fb3642fd19ac04cc3fb787bce208255d416229f.jpg", + "image_caption": [ + "Best Layer Beta vs. Faithfulness", + "Figure 6: OLMo 0424 7B per layer faithfulness scores as a function of the choice of layer at which to fit the LRE. Note we do not use these results to choose the layer for the LRE, instead preferring the results from the causality sweep." + ], + "image_footnote": [], + "bbox": [ + 202, + 133, + 816, + 500 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "measuring causality. Beta is exclusive to measuring faithfulness and rank is exclusive to causality. We test the same ranges for each as in Hernandez et al. (2024): [0, 5] beta and [0, full_rank] for causality at varying intervals. Those intervals are every 2 from [0,100], every 5 from [100,200], every 25 from [200, 500], every 50 from [500, 1000], every 250 from [1000, hidden_size]. We perform the hyperparameter sweeps across faithfulness and causality, but we choose the layer to edit based on the causality score. In cases where this is not the same layer as what faithfulness would decide, we use the layer causality chooses, as it would not make sense to train one LRE for each metric. We refer the reader to Hernandez et al. (2024) for more details on the interactions between hyperparameters and the choice of layer. The results of our sweeps on OLMo-7B across layers in Figures 6 and 7 and across beta and rank choices in Figures 8 and 9.", + "bbox": [ + 169, + 604, + 823, + 743 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D BATCH SEARCH COUNTS COMPARED TO WIMBD", + "text_level": 1, + "bbox": [ + 171, + 765, + 627, + 780 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "In Figure 10, we find that What's in My Big Data (Elazar et al., 2024) matches very well to batch search co-occurrences; however, WIMBD tends to over-predict co-occurrences (slope less than 1), due to the sequence length being shorter than many documents, as discussed in the main paper.", + "bbox": [ + 169, + 797, + 823, + 842 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E FEATURE CORRELATIONS AND IMPORTANCES", + "text_level": 1, + "bbox": [ + 171, + 863, + 588, + 878 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Our feature importance test is shown in Figure 12. This permutation test was done on the heldout data to show which features contribute the most to generalization performance. We use PCA to", + "bbox": [ + 169, + 895, + 823, + 924 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/7e45008f997fe87d85da17fac14afbe181e1bd855d46089d2000d6d279f17c0b.jpg", + "image_caption": [ + "Layer vs. Causality", + "Figure 7: OLMo 0424 7B per layer causality scores as a function of the choice of layer at which to fit the LRE." + ], + "image_footnote": [], + "bbox": [ + 204, + 314, + 818, + 676 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Best Layer Beta vs. Faithfulness", + "bbox": [ + 415, + 294, + 570, + 306 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/834ebec1094b86ae7cfc91aa6d20e88a13cb123b46a91be46bb670f3f4ffa542.jpg", + "image_caption": [ + "Figure 8: OLMo 0424 7B LRE Beta hyperparameter sweep at highest performing layer." + ], + "image_footnote": [], + "bbox": [ + 189, + 313, + 818, + 683 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/d48a6dd92df0c9e1200f947b2cd9997a53d627e698e84082717cc4c1719b0974.jpg", + "image_caption": [ + "Best Layer Rank vs. Causality", + "Figure 9: OLMo 0424 7B LRE Rank hyperparameter sweep at highest performing layer." + ], + "image_footnote": [], + "bbox": [ + 205, + 320, + 815, + 683 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "WIMBD vs Batch Cooccurrence. slope=0.94, r=0.99", + "bbox": [ + 207, + 128, + 789, + 150 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/e0ce908e77a2e481e809b995710be04205bec67e9e7f5367e85b79837aa8831b.jpg", + "image_caption": [ + "WIMBD Cooccurrence" + ], + "image_footnote": [], + "bbox": [ + 210, + 185, + 715, + 388 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/c57e25852404ada77ca4a04964e77b1a84e1d9076ec8387246cef26b62d8f7fd.jpg", + "image_caption": [ + "Figure 10: Comparison between WIMBD and Batch Search subject-object co-occurrences" + ], + "image_footnote": [], + "bbox": [ + 171, + 529, + 491, + 770 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/033cc96dfe8534d7d86dd0369b088e2fbd4344a80840dfa4285ec2df9c7c3d0f.jpg", + "image_caption": [ + "Figure 11: Correlations between each feature in our regression analysis. Because of the high correlation between faithfulness metrics, we use a single dimensional PCA to attain one feature that captures $89\\%$ of the variance of both for the purposes of doing feature importance tests. Note that we zero out the diagonal (which has values of 1) for readability." + ], + "image_footnote": [], + "bbox": [ + 504, + 529, + 823, + 767 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "reduce the faithfulness features to one feature for the purposes of this test. Correlations are shown in Figure 11", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Permutation Importances", + "text_level": 1, + "bbox": [ + 323, + 98, + 661, + 122 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/685630e8cab5d7ffaecc4c03b74e95cadb0e2941465d30626d12961382cbfa32.jpg", + "image_caption": [ + "Figure 12: Hard causality is by far the most important feature for generalizing to new relations when predicting Object frequencies, causing a change in about $15\\%$ accuracy." + ], + "image_footnote": [], + "bbox": [ + 173, + 148, + 823, + 367 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "F RELATIONSHIP BETWEEN CAUSALITY AND ACCURACY", + "text_level": 1, + "bbox": [ + 171, + 434, + 668, + 450 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "In this section, we provide more detail on the relationship between the formation of linear representations and accuracy on in-context learning tasks. Although the two are very highly correlated, we argue that accuracy and LRE formation are somewhat independent.", + "bbox": [ + 169, + 465, + 823, + 508 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "We show this relationship across training for OLMo-1B in Figure 13 and 7B in Figure 14.", + "bbox": [ + 171, + 513, + 761, + 531 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "G EXTENDING TO COMMONSENSE RELATIONS", + "text_level": 1, + "bbox": [ + 171, + 549, + 578, + 566 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Following Elsahar et al. (2018), we focus on factual relations because subject-object co-occurrences are shown to be a good proxy for mentions of the fact. For completeness, we consider 8 additional commonsense relations here. Results for OLMo-7B are shown in Figure 15. We show that frequency is correlated with causality score (.42) in these cases as well, but it is possible subject-object frequencies do not accurately track occurrences of the relation being mentioned. For example, in the \"task person type\" relation, the co-occurrence count of the subject \"researching history\" and the object \"historian\" does not convincingly describe all instances where the historian concept is defined during pretraining. Co-occurrences are perhaps more convincingly related to how a model learns that the outside of a coconut is brown, however (the fruit outside color relation). Therefore, we caution treating these under the same lens as the factual relations. Nevertheless, we believe these results are an interesting perspective on how a different relation family compares to factual relations.", + "bbox": [ + 169, + 580, + 826, + 734 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 173, + 32, + 478, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/aa073b9b312d72e727c34d9effd4ff74d2bc67d5454ba1aa1fb8ca90ff01e807.jpg", + "image_caption": [ + "Zero Shot, 5 Shot, Causality: OLMo 1B", + "Figure 13: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-1B" + ], + "image_footnote": [], + "bbox": [ + 173, + 304, + 823, + 696 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 23 + }, + { + "type": "image", + "img_path": "images/ca992b76e0489ff59dc80b449abc20f4339b2b2e88a80a74be1ef2bbeaf07d28.jpg", + "image_caption": [ + "Zero Shot, 5 Shot, Causality: OLMo 7B", + "Figure 14: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-7B" + ], + "image_footnote": [], + "bbox": [ + 171, + 160, + 823, + 551 + ], + "page_idx": 24 + }, + { + "type": "image", + "img_path": "images/dd50d1b038103e2aab255d65d92dd375cfbbf37543fcd2ba126437888775be19.jpg", + "image_caption": [ + "OLMo-7B 0424 Development of Commonsense LREs over Training Time", + "Figure 15:Commonsense relations compared to pretraining time in OLMo-7B." + ], + "image_footnote": [], + "bbox": [ + 181, + 705, + 823, + 858 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at ICLR 2025", + "bbox": [ + 171, + 32, + 478, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + } +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_model.json b/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a19d2d8bf0462c39b79cdf49d18d1cbc9572f7d6 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_model.json @@ -0,0 +1,3011 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.825, + 0.149 + ], + "angle": 0, + "content": "ON LINEAR REPRESENTATIONS AND PRETRAINING DATA FREQUENCY IN LANGUAGE MODELS" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.172, + 0.741, + 0.189 + ], + "angle": 0, + "content": "Jack Merullo\\(^{\\diamond}\\) Noah A. Smith\\(^{\\text{♣}}\\) Sarah Wiegrefe\\(^{\\text{♥♣}}\\) Yanai Elazar\\(^{\\text{♥♣}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.201, + 0.697, + 0.217 + ], + "angle": 0, + "content": "\\(\\diamond\\) Brown University, \\(\\diamond\\) Allen Institute for AI (Ai2), \\(\\clubsuit\\)University of Washington" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.217, + 0.314, + 0.229 + ], + "angle": 0, + "content": "*Co-senior authors." + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.23, + 0.735, + 0.246 + ], + "angle": 0, + "content": "jack_merullo@brown.edu, {noah, sarahw, yanaie}@allenai.org" + }, + { + "type": "title", + "bbox": [ + 0.451, + 0.281, + 0.547, + 0.297 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.313, + 0.768, + 0.675 + ], + "angle": 0, + "content": "Pretraining data has a direct impact on the behaviors and quality of language models (LMs), but we only understand the most basic principles of this relationship. While most work focuses on pretraining data's effect on downstream task behavior, we investigate its relationship to LM representations. Previous work has discovered that, in language models, some concepts are encoded 'linearly' in the representations, but what factors cause these representations to form (or not)? We study the connection between pretraining data frequency and models' linear representations of factual relations (e.g., mapping France to Paris in a capital prediction task). We find evidence that the formation of linear representations is strongly connected to pretraining term frequencies; specifically for subject-relation-object fact triplets, both subject-object co-occurrence frequency and in-context learning accuracy for the relation are highly correlated with linear representations. This is the case across all phases of pretraining, i.e., it is not affected by the model's underlying capability. In OLMo-7B and GPT-J (6B), we discover that a linear representation consistently (but not exclusively) forms when the subjects and objects within a relation co-occur at least 1k and 2k times, respectively, regardless of when these occurrences happen during pretraining (and around 4k times for OLMo-1B). Finally, we train a regression model on measurements of linear representation quality in fully-trained LMs that can predict how often a term was seen in pretraining. Our model achieves low error even on inputs from a different model with a different pretraining dataset, providing a new method for estimating properties of the otherwise-unknown training data of closed-data models. We conclude that the strength of linear representations in LMs contains signal about the models' pretraining corpora that may provide new avenues for controlling and improving model behavior: particularly, manipulating the models' training data to meet specific frequency thresholds. We release our code to support future work.\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.702, + 0.338, + 0.717 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.733, + 0.827, + 0.887 + ], + "angle": 0, + "content": "Understanding how the content of pretraining data affects language model (LM) behaviors and performance is an active area of research (Ma et al., 2024; Xie et al., 2023; Aryabumi et al., 2025; Longpre et al., 2024; Wang et al., 2025; Seshadri et al., 2024; Razeghi et al., 2023; Wang et al., 2024). For instance, it has been shown that for specific tasks, models perform better on instances containing higher frequency terms than lower frequency ones (Razeghi et al., 2022; Mallen et al., 2023; McCoy et al., 2024). However, the ways in which frequency affects the internal representations of LMs to cause this difference in performance remain unclear. We connect dataset statistics to recent work in interpretability, which focuses on the emergence of simple linear representations of factual relations in LMs Hernandez et al. (2024); Chanin et al. (2024). Our findings demonstrate a strong correlation between these linear representations and the frequency of terms in the pretraining corpus." + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.898, + 0.825, + 0.925 + ], + "angle": 0, + "content": "1Code is available at https://github.com/allenai/freq, and for efficient batch search at https://github.com/allenai/batchsearch." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.266, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.12459v1 [cs.CL] 16 Apr 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.231 + ], + "angle": 0, + "content": "Linear representations in LMs have become central to interpretability research in recent years (Ravfogel et al., 2020; Elazar et al., 2021; Elhage et al., 2021; Slobodkin et al., 2023; Olah et al., 2020; Park et al., 2024; Jiang et al., 2024; Black et al., 2022; Chanin et al., 2024). Linear representations are essentially linear approximations (linear transforms, directions in space) that are simple to understand, and strongly approximate the complex non-linear transformations that networks are implementing. These representations are crucial because they allow us to localize much of the behavior and capabilities of LMs to specific directions in activation space. This allows for simple interventions to control model behaviors, i.e., steering (Todd et al., 2024; Subramani et al., 2022; Hendel et al., 2023; Rimsky et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.236, + 0.828, + 0.364 + ], + "angle": 0, + "content": "Recent work by Hernandez et al. (2024) and Chanin et al. (2024) highlight how the linearity of different types of relations varies greatly depending on the specific relationships being depicted. For example, over \\(80\\%\\) of entities in the \"country-largest-city\" relation, but less than \\(30\\%\\) of entities in the \"star-in-constellation\" relation can be approximated this way (Hernandez et al., 2024). Such findings complicate the understanding of the Linear Representation Hypothesis, which proposes that LMs will represent features linearly (Park et al., 2024) without providing when/why these form. While Jiang et al. (2024) provide both theoretical and empirical evidence that the training objectives of LMs implicitly encourage linear representations, it remains unclear why some features are represented this way while others are not. This open question is a central focus of our investigation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.369, + 0.828, + 0.565 + ], + "angle": 0, + "content": "Whether linear representations for \"common\" concepts are more prevalent in models or simply easier to identify (using current methods) than those for less common concepts remains unclear. We hypothesize that factual relations exhibiting linear representations are correlated with higher mention frequencies in the pretraining data (as has been shown with static embeddings, see Ethayarajh et al., 2019), which we confirm in Section 4. Our results also indicate that this can occur at any point in pretraining, as long as a certain average frequency is reached across subject-object pairs in a relation. In order to count the appearance of terms in data corpora throughout training, we develop an efficient tool for counting tokens in tokenized batches of text, which we release to support future work in this area. We also explore whether the presence of linear representations can provide insights into relation term frequency. In Section 5, we fit a regression model to predict the frequency of individual terms (such as \"The Beatles\") in the pretraining data, based on metrics measuring the presence of a linear representation for some relation. For example, how well a linear transformation approximates the internal computation of the \"lead-singer-of\" relation mapping \"John Lennon\" to \"The Beatles\" can tell us about the frequency of those terms in the pretraining corpus." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.57, + 0.828, + 0.643 + ], + "angle": 0, + "content": "Our findings indicate that the predictive signal, although approximate, is much stronger than that encoded in log probabilities and task accuracies alone, allowing us to estimate the frequencies of held-out relations and terms within approximate ranges. Importantly, this regression model generalizes beyond the specific LM it was trained on without additional supervision. This provides a valuable foundation for analyzing the pretraining corpora of closed-data models with open weights." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.647, + 0.455, + 0.662 + ], + "angle": 0, + "content": "To summarize, in this paper we show that:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.685, + 0.825, + 0.712 + ], + "angle": 0, + "content": "1. The development of linear representations for factual recall relations in LMs is related to frequency as well as model size." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.739, + 0.825, + 0.782 + ], + "angle": 0, + "content": "2. Linear representations form at predictable frequency thresholds during training, regardless of when this frequency threshold is met for the nouns in the relation. The formation of these representations also correlates strongly with recall accuracy." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.808, + 0.825, + 0.851 + ], + "angle": 0, + "content": "3. Measuring the extent to which a relation is represented linearly in a model allows us to predict the approximate frequencies of individual terms in the pretraining corpus of that model, even when we do not have access to the model's training data." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.877, + 0.825, + 0.906 + ], + "angle": 0, + "content": "4. We release a tool for accurately and efficiently searching through tokenized text to support future research on training data." + }, + { + "type": "list", + "bbox": [ + 0.209, + 0.685, + 0.825, + 0.906 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.256, + 0.103, + 0.731, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.349, + 0.828, + 0.435 + ], + "angle": 0, + "content": "Figure 1: Overview of this work. Given a dataset of subject-relation-object factual relation triplets, we count subject-object co-occurrences throughout pretraining batches. We then measure how well the corresponding relations are represented within an LM across pretraining steps, using the Linear Relational Embeddings (LRE) method from Hernandez et al. (2024). We establish a strong relationship between average co-occurrence frequency and a model's tendency to form linear representations for relations. From this, we show that we can predict frequencies in the pretraining corpus" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.459, + 0.33, + 0.475 + ], + "angle": 0, + "content": "2 BACKGROUND" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.492, + 0.409, + 0.506 + ], + "angle": 0, + "content": "2.1 LINEAR REPRESENTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.518, + 0.828, + 0.783 + ], + "angle": 0, + "content": "Vector space models have a long history in language processing, where geometric properties of these spaces were used to encode semantic information (Salton et al., 1975; Paccanaro & Hinton, 2001). When and why linear structure emerges without explicit bias has been of considerable interest since the era of static word embeddings. Work on skipgram models (Mikolov et al., 2013a) found that vector space models of language learn regularities which allow performing vector arithmetic between word embeddings to calculate semantic relationships (e.g., France - Paris + Spain = Madrid) (Mikolov et al., 2013b; Pennington et al., 2014). This property was subject to much debate, as it was not clear why word analogies would appear for some relations and not others (Köper et al., 2015; Karpinska et al., 2018; Gladkova et al., 2016). Followup work showed that linguistic regularities form in static embeddings for relations under specific dataset frequency constraints for relevant terms (Ethayarajh et al., 2019), but does not clearly relate to how modern LMs learn. More recently, there has been renewed interest in the presence of similar linear structure in models with contextual embeddings like transformer language models (Park et al., 2024; Jiang et al., 2024; Merullo et al., 2024). As a result, there are many ways to find and test for linear representations in modern LMs, though the relationship to pretraining data was not addressed (Huben et al., 2024; Gao et al., 2025; Templeton et al., 2024; Rimsky et al., 2024; Todd et al., 2024; Hendel et al., 2023; Hernandez et al., 2024; Chanin et al., 2024). Many of these share similarities in how they compute and test for linear representations. We focus on a particular class of linear representations called Linear Relational Embeddings (LREs) (Paccanaro & Hinton, 2001)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Linear Relational Embeddings (LREs) Hernandez et al. (2024) use a particular class of linear representation called a Linear Relational Embedding (Paccanaro & Hinton, 2001) to approximate the computation performed by a model to predict the objects that complete common subject-relation-object triplets as an affine transformation. This transform is calculated from a hidden state \\( \\mathbf{s} \\), the subject token representation at some middle layer of the model, to \\( \\mathbf{o} \\), the hidden state at the last token position and layer of the model (i.e., the final hidden state that decodes a token in an autoregressive transformer) within a natural language description of the relation. For example, given the input sequence \"Miles Davis (subject) plays the (relation)\", the goal is to approximate the computation of the object \"trumpet\", assuming the model predicts the object cor-" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.218 + ], + "angle": 0, + "content": "directly. It was found that this transformation holds for nearly every subject and object in the relation set (such as \"Cat Stevens plays the guitar\") for some relations. This is surprising because, despite the nonlinearities within the many layers and token positions separating s and o, a simple structure within the representation space well approximates the model's prediction process for a number of factual relations. In this work we study LREs under the same definition and experimental setup, because it allows us to predefine the concepts we want to search for (e.g., factual relations), as well as use a handful of representations to relate thousands of terms in the dataset by learning linear representations on a per-relation level." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.222, + 0.828, + 0.31 + ], + "angle": 0, + "content": "Hernandez et al. calculate LREs to approximate an LM's computation as a first-order Taylor Series approximation. Let \\( F(\\mathbf{s}, c) = \\mathbf{o} \\) be the forward pass through a model that produces object representation \\( \\mathbf{o} \\) given subject representation \\( \\mathbf{s} \\) and a few-shot context \\( c \\), this computation is approximated as \\( F(\\mathbf{s}, c) \\approx W\\mathbf{s} + b = F(\\mathbf{s}_i, c) + W(\\mathbf{s} - \\mathbf{s}_i) \\) where we approximate the relation about a specific subject \\( \\mathbf{s}_i \\). Hernandez et al. propose to compute \\( W \\) and \\( b \\) using the average of \\( n \\) examples from the relation \\( (n = 8 \\) here) with \\( \\frac{\\partial F}{\\partial\\mathbf{s}} \\) representing the Jacobian Matrix of \\( F \\):" + }, + { + "type": "equation", + "bbox": [ + 0.267, + 0.317, + 0.826, + 0.361 + ], + "angle": 0, + "content": "\\[\nW = \\mathbb {E} _ {\\mathbf {s} _ {i}, c _ {i}} \\left[ \\left. \\frac {\\partial F}{\\partial \\mathbf {s}} \\right| _ {(\\mathbf {s} _ {i}, c _ {i})} \\right] \\quad \\text {a n d} \\quad b = \\mathbb {E} _ {\\mathbf {s} _ {i}, c _ {i}} \\left[ \\left. F (\\mathbf {s}, c) - \\frac {\\partial F}{\\partial \\mathbf {s}} \\right| _ {(\\mathbf {s} _ {i}, c _ {i})} \\right] \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.375, + 0.825, + 0.445 + ], + "angle": 0, + "content": "In practice, LREs are estimated using hidden states from LMs during the processing of the test example in a few-shot setup. For a relation like \"instrument-played-by-musician\", the model may see four examples (in the form \"[X] plays the [Y]\") and on the fifth example, when predicting e.g., \"trumpet\" from \"Miles Davis plays the\", the subject representation s and object representation o are extracted." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.464, + 0.522, + 0.479 + ], + "angle": 0, + "content": "2.2 INFERRING TRAINING DATA FROM MODELS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.491, + 0.827, + 0.59 + ], + "angle": 0, + "content": "There has been significant interest recently in understanding the extent to which it is possible to infer the training data of a fully trained neural network, including LMs, predominantly by performing membership inference attacks (Shokri et al., 2017; Carlini et al., 2022), judging memorization of text (Carlini et al., 2023; Oren et al., 2024; Shi et al., 2024), or inferring the distribution of data sources (Hayase et al., 2024; Ateniese et al., 2015; Suri & Evans, 2022). Our work is related in that we find hints of the pretraining data distribution in the model itself, but focus on how linear structure in the representations relates to training data statistics." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.612, + 0.294, + 0.627 + ], + "angle": 0, + "content": "3 METHODS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.645, + 0.827, + 0.73 + ], + "angle": 0, + "content": "Our analysis is twofold: counts of terms in the pretraining corpus of LMs, and measurements of how well factual relations are approximated by affine transformations. We use the OLMo model v1.7 (0424 7B and 0724 1B) (Groeneveld et al., 2024) and GPT-J (6B) (Wang & Komatsuzaki, 2021) and their corresponding datasets: Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), respectively. To understand how these features form over training time, we test eight model checkpoints throughout training in the OLMo family of models (Groeneveld et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.748, + 0.582, + 0.763 + ], + "angle": 0, + "content": "3.1 LINEAR RELATIONAL EMBEDDINGS (LRES) IN LMS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.775, + 0.825, + 0.845 + ], + "angle": 0, + "content": "We use a subset of the RELATIONS dataset Hernandez et al. (2024), focusing on the 25 factual relations of the dataset, such as capital-city and person-mother (complete list in Appendix B). Across these relations, there are 10,488 unique subjects and objects. Following Hernandez et al. (2024), we fit an LRE for each relation on 8 examples from that relation, each with a 5-shot prompt. We use the approach from this work as described in Section 2.1." + }, + { + "type": "page_footnote", + "bbox": [ + 0.17, + 0.86, + 0.827, + 0.926 + ], + "angle": 0, + "content": "For the analysis, we drop \"landmark-on-continent\" because \\(74\\%\\) of the answers are Antarctica, making it potentially confounding for extracting a representation for the underlying relation. Factual relations are much easier to get accurate counts for, so we leave non-factual relations for future work (e.g., although LMs associate the \"pilot\" occupation with men, this relation does not map to the word \"man\" the way \"France\" maps to \"Paris\"; see §3.2)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.827, + 0.258 + ], + "angle": 0, + "content": "Fitting LREs Hernandez et al. (2024) find that Equation 1 underestimates the optimal slope of the linear transformation, so they scale each relation's \\( W \\) by a scalar hyperparameter \\( \\beta \\). Unlike the original work, which finds one \\( \\beta \\) per model, we use one \\( \\beta \\) per relation, as this avoids disadvantageing specific relations. Another difference in our calculation of LREs is that we do not impose the constraint that the model has to predict the answer correctly to be used as one of the 8 examples used to approximate the Jacobian Matrix. Interestingly, using examples that models predict incorrectly to fit Equation 1 works as well as using only correct examples. We opt to use this variant as it allows us to compare different checkpoints and models (\\$4) with linear transformations trained on the same 8 examples, despite the fact that the models make different predictions on these instances. We explore the effect of example choice in Appendix B and find that it does not make a significant difference. We also explore the choice of layer in Appendix C." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.287, + 0.828, + 0.429 + ], + "angle": 0, + "content": "Metrics To evaluate the quality of LREs, Hernandez et al. (2024) introduce two metrics that measure the quality of the learned transformations. Faithfulness measures whether the transformation learned by the LRE produces the same object token prediction as the original LM. Causality measures the proportion of the time a prediction of an object can be changed to the output of a different example from the relation (e.g., editing the Miles Davis subject representation so that the LM predicts he plays the guitar, instead of the trumpet). For specifics on implementation, we refer the reader to Hernandez et al. (2024). We consider an LRE to be high 'quality' when it scores highly on these metrics, as this measures when an LRE works across subject-object pairs within the relation. In general, we prefer to use causality in our analysis, as faithfulness can be high when LMs predict the same token very often (like in early checkpoints)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.457, + 0.577, + 0.473 + ], + "angle": 0, + "content": "3.2 COUNTING FREQUENCIES THROUGHOUT TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.489, + 0.825, + 0.575 + ], + "angle": 0, + "content": "A key question we explore is how term frequencies affect the formation of linear representations. We hypothesize that more commonly occurring relations will lead to higher quality LREs for those relations. Following Elsahar et al. (2018); Elazar et al. (2022), we count an occurrence of a relation when a subject and object co-occur together. While term co-occurrence is used as a proxy for the frequency of the entire triplet mentioned in text, Elsahar et al. (2018) show that this approximation is quite accurate. We now discuss how to compute these co-occurrence counts." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.603, + 0.827, + 0.743 + ], + "angle": 0, + "content": "What's in My Big Data? (WIMBD) Elazar et al. (2024) index many popular pretraining datasets, including Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), and provide search tools that allow for counting individual terms and co-occurrences within documents. However, this only gives us counts for the full dataset. Since we are interested in counting term frequencies throughout pretraining, we count these within training batches of OLMo instead. When per-batch counts are not available, WIMBD offers a good approximation for final checkpoints, which is what we do in the case of GPT-J. We compare WIMBD co-occurrence counts to the Batch Search method (described below) for the final checkpoint of OLMo in Appendix D, and find that the counts are extremely close: The slope of the best fit line for BatchCount against WIMBDCount is .94, because co-occurrence counts are overestimated when considering the whole document." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Batch Search Data counting tools cannot typically provide accurate counts for model checkpoints at arbitrary training steps. Thus, we design a tool to efficiently count exact co-occurrences within sequences of tokenized batches. This also gives us the advantage of counting in a way that is highly accurate to how LMs are trained; since LMs are trained on batches of fixed lengths which often split documents into multiple sequences, miscounts may occur unless using tokenized sequences. Using this method, we note every time one of our 10k terms appears throughout a dataset used to pretrain an LM. We count a co-occurrence as any time two terms appear in the same sequence within a batch (a (batch-size, sequence-length) array). We search 10k terms in the approximately 2T tokens of Dolma (Soldaini et al., 2024) this way. Using our implementation, we are able to complete this on 900 CPUs in about a day. To support future work, we release our code as Cython bindings that integrate out of the box with existing libraries." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.177, + 0.103, + 0.511, + 0.117 + ], + "angle": 0, + "content": "OLMo-7B 0424 Development of LREs over Training Time" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.127, + 0.484, + 0.226 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.179, + 0.231, + 0.51, + 0.244 + ], + "angle": 0, + "content": "OLMo-1B 0724 Development of LREs over Training Time" + }, + { + "type": "image_footnote", + "bbox": [ + 0.541, + 0.111, + 0.707, + 0.125 + ], + "angle": 0, + "content": "41B Tokens (10k steps)" + }, + { + "type": "image_footnote", + "bbox": [ + 0.541, + 0.125, + 0.641, + 0.138 + ], + "angle": 0, + "content": "Final Model" + }, + { + "type": "list", + "bbox": [ + 0.541, + 0.111, + 0.707, + 0.138 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.533, + 0.143, + 0.821, + 0.216 + ], + "angle": 0, + "content": "
ModelCo-Occurrence Threshold (Mean Causality >.9)
GPT-J (6B)1,097
OLMo-7B1,998
OLMo-1B4,447
" + }, + { + "type": "image_caption", + "bbox": [ + 0.537, + 0.231, + 0.812, + 0.244 + ], + "angle": 0, + "content": "GPT-J Development of LREs over Training Time" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.255, + 0.485, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.507, + 0.256, + 0.811, + 0.355 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.371, + 0.825, + 0.468 + ], + "angle": 0, + "content": "Figure 2: We find that LREs have consistently high causality scores across relations after some average frequency threshold is reached (table, top right). In OLMo models, red dots show the model's LRE performance at 41B tokens, and blue dots show the final checkpoint performance (550k steps in 7B). Gray dots show intermediate checkpoints. We highlight Even at very early training steps, if the average subject-object cooc. count is high enough, the models are very likely to already have robust LREs formed in the representation space. Symbols represent different relations. Highlighted relations are shown in darker lines." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.476, + 0.779, + 0.51 + ], + "angle": 0, + "content": "4 FREQUENCY OF SUBJECT-OBJECT CO-OCCURRENCES ALIGNS WITH EMERGENCE OF LINEAR REPRESENTATIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.527, + 0.825, + 0.598 + ], + "angle": 0, + "content": "In this section, we explore when LREs begin to appear at training time and how these are related to pretraining term frequencies. Our main findings are that (1) average co-occurrence frequency within a relation strongly correlates with whether an LRE will form; (2) the frequency effect is independent of the pretraining stage; if the average subject-object co-occurrence for a relation surpasses some threshold, it is very likely to have a high-quality LRE, even for early pretraining steps." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.616, + 0.264, + 0.63 + ], + "angle": 0, + "content": "4.1 SETUP" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.643, + 0.827, + 0.797 + ], + "angle": 0, + "content": "Using the factual recall relations from the Hernandez et al. (2024) dataset, we use the Batch Search method (§3.2) to count subject and object co-occurrences within sequences in Dolma (Soldaini et al., 2024) used to train the OLMo-1B (v. 0724) and 7B (v. 0424) models (Groeneveld et al., 2024). The OLMo family of models provides tools for accurately recreating the batches from Dolma, which allow us to reconstruct the data the way the model was trained. We also use GPT-J (Wang & Komatsuzaki, 2021) and the Pile (Gao et al., 2020) as its training data, but since we do not have access to accurate batches used to train it, we use WIMBD (Elazar et al., 2024) to count subject-object counts in the entire data. We fit LREs on each relation and model separately. Hyperparameter sweeps are in Appendix C. OLMo also releases intermediate checkpoints, which we use to track development over pretraining time. We use checkpoints that have seen {41B, 104B, 209B, 419B, 628B, 838B, 1T, and 2T} tokens.3 We use the Pearson coefficient for measuring correlation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.815, + 0.279, + 0.829 + ], + "angle": 0, + "content": "4.2 RESULTS" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.842, + 0.825, + 0.871 + ], + "angle": 0, + "content": "Our results are summarized in Figure 2. We report training tokens because the step count differs between 7B and 1B. Co-occurrence frequencies highly correlate with causality \\((r = 0.82)\\). This" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.884, + 0.803, + 0.899 + ], + "angle": 0, + "content": "3In OLMo-7B 0424, this corresponds to \\(10\\mathrm{k}\\) 25k, 50k, 100k, 150k, 200k, 250k, 409k pretraining steps" + }, + { + "type": "page_footnote", + "bbox": [ + 0.174, + 0.899, + 0.825, + 0.925 + ], + "angle": 0, + "content": "These are: 'country largest city', 'country currency', 'company hq', 'company CEO', and 'star constellation name' in order from best to worst performing final checkpoints." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.884, + 0.825, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "is notably higher than the correlations with subject frequencies: \\( r = 0.66 \\), and object frequencies: \\( r = 0.59 \\) for both OLMo-7B and OLMo-1B, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.224 + ], + "angle": 0, + "content": "We consider a causality score above 0.9 to be nearly perfectly linear. The table in Figure 2 shows the co-occurrence counts above which the average causality is above 0.9 and is shown by dashed black lines on the scatterplots. Regardless of pretraining step, models that surpass this threshold have very high causality scores. Although we cannot draw conclusions from only three models, it is possible that scale also affects this threshold: OLMo-7B and GPT-J (6B params) require far less exposure than OLMo-1B." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.24, + 0.427, + 0.254 + ], + "angle": 0, + "content": "4.3 RELATIONSHIP TO ACCURACY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.265, + 0.827, + 0.476 + ], + "angle": 0, + "content": "Increased frequency (or a proxy for it) was shown to lead to better factual recall in LMs (Chang et al., 2024; Mallen et al., 2023). However, it remains unknown whether high accuracy entails the existence of a linear relationship. Such a finding would inform when we expect an LM to achieve high accuracy on a task. We find that the correlation between causality and subject-object frequency is higher than with 5-shot accuracy (0.82 v.s. 0.74 in OLMo-7B), though both are clearly high. In addition, there are a few examples of high accuracy relations that do not form single consistent LREs. These relations are typically low frequency, such as star constellation name, which has \\(84\\%\\) 5-shot accuracy but only \\(44\\%\\) causality (OLMo-7B), with subjects and objects only co-occurring about 21 times on average across the full dataset. In general, few-shot accuracy closely tracks causality, consistent with arguments that in-context learning allows models to identify linear mappings between input-output pairs (Hendel et al., 2023; Garg et al., 2022). We find that causality increases first in some cases, like \"food-from-country\" having a causality of \\(65\\%\\) but a 5-shot accuracy of only \\(42\\%\\). This gap is consistently closed through training. In the final model, causality and 5-shot accuracy are within \\(11\\%\\) on average. We report the relationship between every relation, zero-shot, and few-shot accuracy for OLMo models across training in Appendix F." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.826, + 0.539 + ], + "angle": 0, + "content": "A fundamental question in the interpretability community is under what circumstances linear structures form. While previous work has shown that the training objective encourages this type of representation (Jiang et al., 2024), our results suggest that the reason why some concepts form a linear representation while others do not is strongly related to the pretraining frequency." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.556, + 0.765, + 0.59 + ], + "angle": 0, + "content": "5 LINEAR REPRESENTATIONS HELP PREDICT PRETRAINING CORPUS FREQUENCIES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.827, + 0.744 + ], + "angle": 0, + "content": "In this section, we aim to understand this relationship further by exploring what we can understand about pretraining term frequency from linearity of LM representations. We target the challenging problem of predicting how often a term, or co-occurrence of terms, appears in an LM's training data from the representations alone. Such prediction model can be useful, if it generalizes, when applied to other models whose weights are open, but the data is closed. For instance, such predictive model could tell us whether a model was trained on specific domains (e.g., Java code) by measuring the presence of relevant LREs. First, we show that LRE features encode information about frequency that is not present using probabilities alone. Then, we show how a regression fit on one model generalizes to the features extracted from another without any information about the new model's counts." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.759, + 0.377, + 0.773 + ], + "angle": 0, + "content": "5.1 EXPERIMENTAL SETUP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.827, + 0.926 + ], + "angle": 0, + "content": "We fit a regression to the Relations dataset (Hernandez et al., 2024) using OLMo-7B LRE features and log probabilities. We fit 24 models such that each relation is held out once per random seed across 4 seeds. We train a random forest regression model with 100 decision tree estimators to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting \"John Lennon\" and \"The Beatles\" or just \"The Beatles\") from one of two sets of features. Our baseline set of features is based on likelihood of recalling a fact. Given some few-shot context from the relations dataset (\"John Lennon is a lead singer of\") we extract the log probability of the correct answer, as well as the average accuracy on this prompt across 5 trials. The intuition is that models will be more confident about highly frequent terms. The other set of features include the first, as well as faithfulness and causality measurement." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.104, + 0.763, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.273, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Figure 3: Within-Magnitude accuracy (aka the proportion of predictions within one order of magnitude of ground truth) for models predicting object and subject-object co-occurrences in heldout relations. Using LRE features outperforms LM only features by about \\(30\\%\\). We find that it is much easier to predict object frequencies; the subj-object prediction models with LRE features only marginally outperform baseline performance." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.362, + 0.825, + 0.489 + ], + "angle": 0, + "content": "We use Faithfulness and Causality as defined in Hernandez et al. (2024) as well as two other metrics: Faith Prob., which is the log probability of the correct answer as produced by an LRE, and Hard Causality, which is the same as the \"soft\" variant, but only counts the proportion of times the causality edit produces the target answer as the number one prediction. We use every example from the relations for which there are more than one object occurrence or subject-object co-occurrence. We do not provide an explicit signal for which relation an example comes from, but due to the bias of subjects/objects having similar frequencies within a relation, we train multiple models and evaluate on held out relations and average performance. In all settings, the held out set objects and relations are guaranteed to not have been in the training set." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.508, + 0.684, + 0.523 + ], + "angle": 0, + "content": "5.2 LRE METRICS ENCODE FINE-GRAINED FREQUENCY INFORMATION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.536, + 0.825, + 0.689 + ], + "angle": 0, + "content": "Because of the difficulty of predicting the exact number of occurrences, we report accuracy within one order of magnitude of the ground truth. This measures whether the predicted value is within a reasonable range of the actual value. Results are shown in Figure 3. We find that language modeling features do not provide any meaningful signal towards predicting object or subject-object frequencies, and are only marginally above the baseline of predicting the average or random frequencies from the training data. On object frequency predictions, we find that LRE features encode a strong signal allowing for accurate predictions about \\(70\\%\\) of the time. Mean absolute error of the predictions (in natural log space) for LRE features (LM-only features) are 2.1, (4.2) and 1.9, (2.3) on object prediction and subject-object prediction tasks, respectively. We find that subject-object cooccurrence frequency is likely too difficult to predict given the signals that we have here, as our predictions are higher than, but within one standard deviation of the mean baseline." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.708, + 0.825, + 0.835 + ], + "angle": 0, + "content": "Feature Importance: How important are LRE features for predicting the frequency of an item? We perform feature permutation tests to see how much each feature (LRE features and log probts) contributes to the final answer. First, we check to see which features used to fit the regression are correlated, as if they are, then perturbing one will leave the signal present in another. In Appendix E, we show that only faithfulness and faith probability are strongly correlated, so for this test only, we train models with a single PCA component representing \\(89\\%\\) of the variance of those two features. We find that hard causality is by far the most important feature for generalization performance, causing a difference of about \\(15\\%\\) accuracy, followed by faithfulness measures with \\(5\\%\\) accuracy, providing evidence that the LRE features are encoding an important signal." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.855, + 0.446, + 0.869 + ], + "angle": 0, + "content": "5.3 GENERALIZATION TO A NEW LM" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.927 + ], + "angle": 0, + "content": "Next, we test the ability to generalize the regression fit of one LM to another, without requiring further supervision. If such a model could generalize, we can predict term counts to models for which we do not have access to their pretraining data. We keep the objective the same and apply" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.184 + ], + "angle": 0, + "content": "Table 1: Within-Magnitude accuracy for different settings of train and test models. Overall, we find that fitting a regression on one model's LREs and evaluating on the other provides a meaningful signal compared to fitting using only log probability and task performance, or predicting the average training data frequency. The metric here is proportion of predictions within one order of \\(10\\mathrm{x}\\) the ground truth. Here, Eval. on GPT-J means the regression is fit on OLMo and evaluated on GPT-J." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.184, + 0.821, + 0.265 + ], + "angle": 0, + "content": "
ModelPredicting Object Occs.Predicting Subject-Object Co-Occs.
Eval. on GPT-JEval. on OLMoEval. on GPT-JEval. on OLMo
LRE Features0.65±0.120.49±0.120.76±0.120.68±0.08
LogProb Features0.42±0.100.41±0.090.66±0.090.60±0.07
Mean Freq. Baseline0.31±0.150.41±0.170.57±0.150.67±0.16
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.279, + 0.825, + 0.309 + ], + "angle": 0, + "content": "the regression model, fit for example on OLMo (\"Train OLMo\" setting), to features extracted from GPT-J, using ground truth counts from The Pile (and vice versa, i.e., the \"Train GPT-J\" setting)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.314, + 0.827, + 0.413 + ], + "angle": 0, + "content": "We again train a random forest regression model to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting \"John Lennon\" and \"The Beatles\" or just \"The Beatles\") on features from one of two models: either OLMo-7B (final checkpoint) or GPT-J, treating the other as the 'closed' model. We test the hypothesis that LRE features (faithfulness, causality) are useful in predicting term frequencies across different models, with the hope that this could be applied to dataset inference methods in the future, where access to the ground truth pretraining data counts is limited or unavailable." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.434, + 0.827, + 0.603 + ], + "angle": 0, + "content": "Results Our results are presented in Table 1. First, we find that there is a signal in the LRE features that does not exist in the log probability features: We are able to fit a much better generalizable model when using LRE features as opposed to the LM probabilities alone. Second, evaluating on the LRE features of a heldout model (scaled by the ratio of total tokens trained between the two models) maintains around the same accuracy when fit on exact counts from OLMo, allowing us to predict occurrences without access to the GPT-J pretraining data. We find that predicting either the subject-object co-occurrences or object frequencies using LREs alone is barely better than the baseline. This task is much more difficult than predicting the frequency of the object alone, but our model may just also be unable to account for outliers in the data, which is tightly clustered around the mean (thus giving the high mean baseline performance of between approx. \\(60 - 70\\%\\)). Nevertheless, we show that linear structure for relations within LM representations encode a rich signal representing dataset frequency." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.624, + 0.342, + 0.638 + ], + "angle": 0, + "content": "5.4 ERROR ANALYSIS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.653, + 0.827, + 0.779 + ], + "angle": 0, + "content": "In Table 2 we show example predictions from our regression model that we fit on OLMo and evaluate on heldout relations with LREs measured on GPT-J. We find that some relations transfer more easily than others, with the star constellation name transferring especially poorly. In general, the regression transfers well, without performance deteriorating much (about \\(5\\%\\) accuracy: see Figure 3 compared to the evaluation of GPT-J in Table 1), suggesting LREs encode information in a consistent way across models. We also find that the regression makes use of the full prediction range, producing values in the millions (see Table 2) as well as in the tens; The same regression shown in the table also predicts 59 occurrences for \"Caroline Bright\" (Will Smith's mother) where the ground truth is 48." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.806, + 0.312, + 0.821 + ], + "angle": 0, + "content": "6 DISCUSSION" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Connection to Factual Recall Work in interpretability has focused largely around linear representations in recent years, and our work aims to address the open question of the conditions in which they form. We find that coherent linear representations form when the relevant terms (in this case subject-object co-occurrences) appear in pretraining at a consistent enough rate. Analogously, Chang et al. (2024) show that repeated exposure encourages higher retention of facts. Future work could investigate the connection between factual recall accuracy and linear representations." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.113, + 0.825, + 0.196 + ], + "angle": 0, + "content": "Table 2: Examples of a regression fit on OLMo LRE metrics and evaluated on GPT-J on heldout relations, demonstrating common error patterns: 1. Predictions are better for relations that are closer to those found in fitting the relation (country related relations), 2. Some relations, like star-constellation perform very poorly, possibly due to low frequency, 3. The regression model can be sensitive to the choice of subject (e.g., William vs. Harry), telling us the choice of data to measure LREs for is important for predictions." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.197, + 0.823, + 0.309 + ], + "angle": 0, + "content": "
Predicting Object Frequency in GPT-J, Regression fit on OLMo
RelationSubjectObjectPredictionGround TruthError
landmark-in-countryMenangle ParkAustralia2,986,9893,582,6021.2x
country-languageBrazilPortuguese845,406561,0051x
star-constellation nameArcturusBoötes974,5502,817346x
person-motherPrince WilliamPrincess Diana5,82627,0944.6x
person-motherPrince HarryPrincess Diana13127,094207x
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.324, + 0.827, + 0.576 + ], + "angle": 0, + "content": "Linear Representations in LMs The difficulty of disentangling the formation of linear representations from increases in relation accuracy, especially in the few-shot case, is interesting. Across 24 relations, only the \"star-constellation-name\" and \"product-by-company\" relations have few-shot accuracies that far exceed their causality scores (and both are low frequency). Thus, it is still an open question how LMs are able to recall these tasks. The fact that few-shot accuracy and causality seem so closely linked is consistent with findings that ICL involves locating the right task (Min et al., 2022) and applying a 'function' to map input examples to outputs (Hendel et al., 2023; Todd et al., 2024). The finding that frequency controls this ability is perhaps unsurprising, as frequency also controls this linear structure emerging in static embeddings (Ethayarajh et al., 2019). Jiang et al. (2024) prove a strong frequency-based condition (based on matched log-odds between subjects and objects) and an implicit bias of gradient descent (when the frequency condition is not met) encourage linearity in LLMs; our work empirically shows conditions where linear representations tend to form in more realistic settings. If LMs are 'only' solving factual recall or performing ICL through linear structures, it is surprising how well this works at scale, but the simplicity also provides a promising way to understand LMs and ICL in general. An interesting avenue for future work would be to understand if and when LMs use a method that is not well approximated linearly to solve these types of tasks, as recent work has shown non-linearity can be preferred for some tasks in recurrent networks (Csordás et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.602, + 0.825, + 0.729 + ], + "angle": 0, + "content": "Future Work in Predicting Dataset Frequency The ability to predict the contents of pretraining data is an important area for investigating memorization, contamination, and privacy of information used to train models. In our approach, we show it is possible to extract pretraining data signal without direct supervision. Without interpretability work on the nature of representations in LMs, we would not know of this implicit dataset signal, and we argue that interpretability can generate useful insights more broadly as well. Extensions on this work could include more information to tighten the prediction bounds on frequency, such as extracting additional features from the tokenizer (Hayase et al., 2024). We hope this work encourages future research in other ways properties of pretraining data affect LM representations for both improving and better understanding these models." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.761, + 0.321, + 0.777 + ], + "angle": 0, + "content": "7 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.825, + 0.925 + ], + "angle": 0, + "content": "We find a connection between linear representations of subject-relation-object factual triplets in LMs and the pretraining frequencies of the subjects and objects in those relations. This finding can guide future interpretability work in deciphering whether a linear representation for a given concept will exist in a model, since we observe that frequencies below a certain threshold for a given model will not yield LREs (a particular class of linear representation). From there we show that we can use the presence of linear representations to predict with some accuracy the frequency of terms in the pretraining corpus of an open-weights, closed-data model without supervision. Future work could aim to improve on our bounds of predicted frequencies. Overall, our work presents a meaningful step towards understanding the interactions between pretraining data and internal LM representations." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.357, + 0.119 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.135, + 0.825, + 0.164 + ], + "angle": 0, + "content": "This work was performed while JM was an intern at Ai2. We thank the anonymous reviewers and members of the Aristo and AllenNLP teams at Ai2 for valuable feedback." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.187, + 0.287, + 0.202 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.21, + 0.826, + 0.268 + ], + "angle": 0, + "content": "Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. To code or not to code? exploring impact of code in pretraining. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=zSfeN1uAcx." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.279, + 0.826, + 0.336 + ], + "angle": 0, + "content": "Giuseppe Ateniese, Luigi V Mancini, Angelo Spognardi, Antonio Villani, Domenico Vitali, and Giovanni Felici. Hacking smart machines with smarter ones: How to extract meaningful data from machine learning classifiers. International Journal of Security and Networks, 10(3):137-150, 2015. URL https://dl.acm.org/doi/10.1504/IJSN.2015.071829." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.347, + 0.826, + 0.391 + ], + "angle": 0, + "content": "Sid Black, Lee Sharkey, Leo Grinsztajn, Eric Winsor, Dan Braun, Jacob Merizian, Kip Parker, Carlos Ramón Guevara, Beren Millidge, Gabriel Alfour, and Connor Leahy. Interpreting neural networks through the polytope lens, 2022. URL https://arxiv.org/abs/2211.12312." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.402, + 0.826, + 0.458 + ], + "angle": 0, + "content": "Nicholas Carlini, Steve Chien, Milad Nasr, Shuang Song, Andreas Terzis, and Florian Tramér. Membership inference attacks from first principles. In 2022 IEEE Symposium on Security and Privacy (SP), pp. 1897-1914, 2022. URL https://ieeexplore.ieee.org/document/9833649/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.47, + 0.826, + 0.527 + ], + "angle": 0, + "content": "Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. Quantifying memorization across neural language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=TatRHT_1cK." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.539, + 0.826, + 0.596 + ], + "angle": 0, + "content": "Hoyeon Chang, Jinho Park, Seonghyeon Ye, Sohee Yang, Youngkyung Seo, Du-Seong Chang, and Minjoon Seo. How do large language models acquire factual knowledge during pretraining? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=TYdzj1EvBP." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.607, + 0.826, + 0.692 + ], + "angle": 0, + "content": "David Chanin, Anthony Hunter, and Oana-Maria Camburu. Identifying Linear Relational Concepts in Large Language Models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1524-1535. Association for Computational Linguistics, 2024. doi: 10.18653/v1/2024.naacl-long.85. URL https://aclanthology.org/2024.naacl-long.85." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.703, + 0.826, + 0.802 + ], + "angle": 0, + "content": "Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. Recurrent neural networks learn to store and generate sequences using non-linear representations. In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 248-262, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.17. URL https://aclanthology.org/2024.blackboxnlp-1.17/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.814, + 0.826, + 0.857 + ], + "angle": 0, + "content": "Yanai Elazar, Shauli Ravfogel, Alon Jacovi, and Yoav Goldberg. Amnesic Probing: Behavioral Explanation with Amnesic Counterfactuals. Transactions of the Association for Computational Linguistics, 9:160-175, 03 2021. URL https://doi.org/10.1162/tacl_a_00359." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.868, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Yanai Elazar, Nora Kassner, Shauli Ravfogel, Amir Feder, Abhilasha Ravichander, Marius Mosbach, Yonatan Belinkov, Hinrich Schütze, and Yoav Goldberg. Measuring causal effects of data statistics on language model's 'factual' predictions. arXiv preprint arXiv:2207.14251, 2022. URL https://arxiv.org/abs/2207.14251." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.21, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.175 + ], + "angle": 0, + "content": "Yanai Elazar, Akshita Bhagia, Ian Helgi Magnusson, Abhilasha Ravichander, Dustin Schwenk, Alane Suhr, Evan Pete Walsh, Dirk Groeneveld, Luca Soldaini, Sameer Singh, Hannaneh Hajishirzi, Noah A. Smith, and Jesse Dodge. What's in my big data? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RvfPnOkPV4." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.183, + 0.826, + 0.24 + ], + "angle": 0, + "content": "Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 2021. URL https://transformer-circuits.pub/2021/framework/index.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.248, + 0.828, + 0.361 + ], + "angle": 0, + "content": "Hady Elsahar, Pavlos Vougiouklis, Arslen Remaci, Christophe Gravier, Jonathon Hare, Frederique Laforest, and Elena Simperl. T-REx: A large scale alignment of natural language with knowledge base triples. In Nicoletta Calzolari, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Koiti Hasida, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, Stelios Piperidis, and Takenobu Tokunaga (eds.), Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan, May 2018. European Language Resources Association (ELRA). URL https://aclanthology.org/L18-1544." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.369, + 0.826, + 0.44 + ], + "angle": 0, + "content": "Kawin Ethayarajh, David Duvenaud, and Graeme Hirst. Towards Understanding Linear Word Analogies. In Anna Korhonen, David Traum, and Lluís Márquez (eds.), Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 3253-3262. Association for Computational Linguistics, 2019. doi: 10.18653/v1/P19-1315. URL https://aclanthology.org/P19-1315." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.448, + 0.826, + 0.506 + ], + "angle": 0, + "content": "Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020. URL https://arxiv.org/abs/2101.00027." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.513, + 0.826, + 0.571 + ], + "angle": 0, + "content": "Leo Gao, Tom Dupre la Tour, Henk Tillman, Gabriel Goh, Rajan Troll, Alec Radford, Ilya Sutskever, Jan Leike, and Jeffrey Wu. Scaling and evaluating sparse autoencoders. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tcsZt9ZNKD." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.578, + 0.826, + 0.637 + ], + "angle": 0, + "content": "Shivam Garg, Dimitris Tsipras, Percy Liang, and Gregory Valiant. What can transformers learn in-context? a case study of simple function classes. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=f1NZJ2eOet." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.644, + 0.826, + 0.716 + ], + "angle": 0, + "content": "Anna Gladkova, Aleksandr Drozd, and Satoshi Matsuoka. Analogy-based detection of morphological and semantic relations with word embeddings: what works and what doesn't. In Jacob Andreas, Eunsol Choi, and Angeliki Lazaridou (eds.), Proceedings of the NAACL Student Research Workshop, pp. 8-15, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-2002. URL https://aclanthology.org/N16-2002/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.723, + 0.828, + 0.822 + ], + "angle": 0, + "content": "Dirk Groeneveld, Iz Beltagy, Evan Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. OLMo: Accelerating the science of language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15789-15809, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.841. URL https://aclanthology.org/2024.acl-long.841/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.83, + 0.826, + 0.887 + ], + "angle": 0, + "content": "Jonathan Hayase, Alisa Liu, Yejin Choi, Sewoong Oh, and Noah A. Smith. Data mixture inference: What do BPE tokenizers reveal about their training data? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=EHXyeImux0." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Roee Hendel, Mor Geva, and Amir Globerson. In-Context Learning Creates Task Vectors. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.511, + 0.961 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.103, + 0.826, + 0.147 + ], + "angle": 0, + "content": "Linguistics: EMNLP 2023, pp. 9318-9333. Association for Computational Linguistics, 2023. doi: 10.18653/v1/2023-findings-emnlp.624. URL https://aclanthology.org/2023-findings-emnlp.624." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.155, + 0.825, + 0.212 + ], + "angle": 0, + "content": "Evan Hernandez, Arnab Sen Sharma, Tal Haklay, Kevin Meng, Martin Wattenberg, Jacob Andreas, Yonatan Belinkov, and David Bau. Linearity of relation decoding in transformer language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=w7LU2s14kE." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.221, + 0.826, + 0.276 + ], + "angle": 0, + "content": "Robert Huben, Hoagy Cunningham, Logan Riggs Smith, Aidan Ewart, and Lee Sharkey. Sparse autoencoders find highly interpretable features in language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=F76bwRSLeK." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.286, + 0.826, + 0.342 + ], + "angle": 0, + "content": "Yibo Jiang, Goutham Rajendran, Pradeep Kumar Ravikumar, Bryon Aragam, and Victor Veitch. On the origins of linear representations in large language models. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=otuTw4Mghk." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.352, + 0.826, + 0.436 + ], + "angle": 0, + "content": "Marzena Karpinska, Bofang Li, Anna Rogers, and Aleksandr Drozd. Subcharacter information in Japanese embeddings: When is it worth it? In Georgiana Dinu, Miguel Ballesteros, Avirup Sil, Sam Bowman, Wael Hamza, Anders Sogaard, Tahira Naseem, and Yoav Goldberg (eds.), Proceedings of the Workshop on the Relevance of Linguistic Structure in Neural Architectures for NLP, pp. 28-37, Melbourne, Australia, July 2018. Association for Computational Linguistics. doi: 10.18653/v1/W18-2905. URL https://aclanthology.org/W18-2905/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.444, + 0.826, + 0.515 + ], + "angle": 0, + "content": "Maximilian Köper, Christian Scheible, and Sabine Schulte im Walde. Multilingual reliability and \"semantic\" structure of continuous word spaces. In Matthew Purver, Mehrnoosh Sadrzadeh, and Matthew Stone (eds.), Proceedings of the 11th International Conference on Computational Semantics, pp. 40-45, London, UK, April 2015. Association for Computational Linguistics. URL https://aclanthology.org/W15-0105/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.524, + 0.826, + 0.636 + ], + "angle": 0, + "content": "Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 3245-3276, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.179. URL https://aclanthology.org/2024.naacl-long.179/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.644, + 0.826, + 0.701 + ], + "angle": 0, + "content": "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help LLMs reasoning? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KIPJKST4gw." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.71, + 0.826, + 0.807 + ], + "angle": 0, + "content": "Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9802–9822, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.546. URL https://aclanthology.org/2023.acl-long.546." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.817, + 0.826, + 0.874 + ], + "angle": 0, + "content": "R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Griffiths. Embers of autoregression show how large language models are shaped by the problem they are trained to solve. Proceedings of the National Academy of Sciences, 121(41):e2322420121, 2024. URL https://www.pnas.org/doi/abs/10.1073/pnas.2322420121." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Jack Merullo, Carsten Eickhoff, and Ellie Pavlick. Language models implement simple Word2Vec-style vector arithmetic. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational" + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.104, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5030-5047, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.281. URL https://aclanthology.org/2024.nacl-long.281." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.155, + 0.826, + 0.198 + ], + "angle": 0, + "content": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781, 2013a. URL https://arxiv.org/abs/1301.3781." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.207, + 0.826, + 0.293 + ], + "angle": 0, + "content": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In C.J. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K.Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013b. URL https://proceedings.neurips.cc/paper_files/paper/2013/file/9aa42b31882ec039965f3c4923ce901b-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.301, + 0.826, + 0.387 + ], + "angle": 0, + "content": "Sewon Min, Xinxi Lyu, Ari Holtzman, Mikel Artetxe, Mike Lewis, Hannaneh Hajishirzi, and Luke Zettlemoyer. Rethinking the role of demonstrations: What makes in-context learning work? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 11048-11064, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.759. URL https://aclanthology.org/2022.emnlp-main.759/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.394, + 0.826, + 0.438 + ], + "angle": 0, + "content": "Chris Olah, Nick Cammarata, Ludwig Schubert, Gabriel Goh, Michael Petrov, and Shan Carter. Zoom in: An introduction to circuits. Distill, 5(3):e00024-001, 2020. URL https://distill.pub/2020/circuits/zoom-in/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.446, + 0.826, + 0.503 + ], + "angle": 0, + "content": "Yonatan Oren, Nicole Meister, Niladri S. Chatterji, Faisal Ladhak, and Tatsunori Hashimoto. Proving test set contamination in black-box language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KS8mIvetg2." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.512, + 0.826, + 0.569 + ], + "angle": 0, + "content": "Alberto Paccanaro and Geoffrey E Hinton. Learning Hierarchical Structures with Linear Relational Embedding. In Advances in Neural Information Processing Systems, volume 14. MIT Press, 2001. URL https://papers.nips.cc/paper_files/paper/2001/bit/814a9c18f5abff398787c9cfcbf3d80c-Abstract.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.577, + 0.826, + 0.622 + ], + "angle": 0, + "content": "Kiho Park, Yo Joong Choe, and Victor Veitch. The Linear Representation Hypothesis and the Geometry of Large Language Models. In *Forty-First International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=UGpGkLzwpP." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.629, + 0.826, + 0.701 + ], + "angle": 0, + "content": "Jeffrey Pennington, Richard Socher, and Christopher Manning. GloVe: Global vectors for word representation. In Alessandro Moschitti, Bo Pang, and Walter Daelemans (eds.), Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1532-1543, Doha, Qatar, October 2014. Association for Computational Linguistics. doi: 10.3115/v1/D14-1162. URL https://aclanthology.org/D14-1162." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.709, + 0.826, + 0.78 + ], + "angle": 0, + "content": "Shauli Ravfogel, Yanai Elazar, Hila Gonen, Michael Twiton, and Yoav Goldberg. Null it out: Guarding protected attributes by iterative nullspace projection. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7237-7256, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.647." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.788, + 0.826, + 0.874 + ], + "angle": 0, + "content": "Yasaman Razeghi, Robert L Logan IV, Matt Gardner, and Sameer Singh. Impact of pretraining term frequencies on few-shot numerical reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 840-854, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.59. URL https://aclanthology.org/2022-findings-emnlp.59/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.882, + 0.826, + 0.925 + ], + "angle": 0, + "content": "Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In NeurIPS Workshop on Attributing Model Behavior at Scale, 2023. URL https://openreview.net/forum?id=EKvqw9k3lC." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.104, + 0.826, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.148 + ], + "angle": 0, + "content": "Nina Rimsky, Nick Gabrieli, Julian Schulz, Meg Tong, Evan Hubinger, and Alexander Turner. Steering llama 2 via contrastive activation addition. pp. 15504-15522, August 2024. doi: 10.18653/v1/2024.acl-long.828. URL https://aclanthology.org/2024.acl-long.828/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.157, + 0.825, + 0.2 + ], + "angle": 0, + "content": "G. Salton, A. Wong, and C. S. Yang. A vector space model for automatic indexing. Commun. ACM, 18(11):613-620, November 1975. ISSN 0001-0782. doi: 10.1145/361219.361220. URL https://doi.org/10.1145/361219.361220." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.21, + 0.825, + 0.281 + ], + "angle": 0, + "content": "Naomi Saphra and Sarah Wiegrefe. Mechanistic? In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 480-498, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.30. URL https://aclanthology.org/2024.blackboxnlp-1.30/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.29, + 0.825, + 0.375 + ], + "angle": 0, + "content": "Preethi Seshadri, Sameer Singh, and Yanai Elazar. The bias amplification paradox in text-to-image generation. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 6367-6384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.353. URL https://aclanthology.org/2024.naacl-long.353/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.825, + 0.442 + ], + "angle": 0, + "content": "Weijia Shi, Anirudh Ajith, Mengzhou Xia, Yangsibo Huang, Daogao Liu, Terra Blevins, Danqi Chen, and Luke Zettlemoyer. Detecting pretraining data from large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=zWqr3MQuNs." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.451, + 0.825, + 0.508 + ], + "angle": 0, + "content": "Reza Shokri, Marco Stronati, Congzheng Song, and Vitaly Shmatikov. Membership inference attacks against machine learning models. In 2017 IEEE Symposium on Security and Privacy (SP), pp. 3-18, 2017. doi: 10.1109/SP.2017.41. URL https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7958568." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.518, + 0.825, + 0.603 + ], + "angle": 0, + "content": "Aviv Slobodkin, Omer Goldman, Avi Caciularu, Ido Dagan, and Shauli Ravfogel. The curious case of hallucinatory (un)answerability: Finding truths in the hidden states of over-confident large language models. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 3607-3625, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.220. URL https://aclanthology.org/2023.emnlp-main.220/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.613, + 0.825, + 0.711 + ], + "angle": 0, + "content": "Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, et al. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15725-15788, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.840. URL https://aclanthology.org/2024.acl-long.840/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.721, + 0.825, + 0.791 + ], + "angle": 0, + "content": "Nishant Subramani, Nivedita Suresh, and Matthew Peters. Extracting Latent Steering Vectors from Pretrained Language Models. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 566-581. Association for Computational Linguistics, 2022. doi: 10.18653/v1/2022-findings-acl.48. URL https://aclanthology.org/2022-findings-acl.48." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.801, + 0.825, + 0.843 + ], + "angle": 0, + "content": "Anshuman Suri and David Evans. Formalizing and estimating distribution inference risks. Proceedings on Privacy Enhancing Technologies, 2022. URL https://arxiv.org/abs/2109.06024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.855, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Adly Templeton, Tom Conerly, Jonathan Marcus, Jack Lindsey, Trenton Bricken, Brian Chen, Adam Pearce, Craig Citro, Emmanuel Ameisen, Andy Jones, et al. Scaling Monoseismicity: Extracting Interpretable Features from Claude 3 Sonnet. 2024. URL https://transformer-circuits.pub/2024/scaling-monoseismicity/index.html." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.104, + 0.825, + 0.148 + ], + "angle": 0, + "content": "Eric Todd, Millicent Li, Arnab Sen Sharma, Aaron Mueller, Byron C Wallace, and David Bau. Function vectors in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AwyxtyMwaG." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.825, + 0.185 + ], + "angle": 0, + "content": "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.192, + 0.825, + 0.248 + ], + "angle": 0, + "content": "Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=dZsEOFUDew." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.257, + 0.825, + 0.314 + ], + "angle": 0, + "content": "Xinyi Wang, Antonis Antoniades, Yanai Elazar, Alfonso Amayuelas, Alon Albalak, Kexun Zhang, and William Yang Wang. Generalization v.s. memorization: Tracing language models' capabilities back to pretraining data. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=IQxBDLmVpT." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.322, + 0.825, + 0.38 + ], + "angle": 0, + "content": "Sang Michael Xie, Hieu Pham, Xuanyi Dong, Nan Du, Hanxiao Liu, Yifeng Lu, Percy Liang, Quoc V Le, Tengyu Ma, and Adams Wei Yu. Doremi: Optimizing data mixtures speeds up language model pretraining. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=1XuByUeHhd." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.825, + 0.38 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.405, + 0.322, + 0.421 + ], + "angle": 0, + "content": "A LIMITATIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.436, + 0.827, + 0.631 + ], + "angle": 0, + "content": "While our approach thoroughly tracks exposure to individual terms and formation of LRE features across pretraining, we can not draw causal6 claims about how exposure affects individual representations, due to the cost of counterfactual pretraining. We try to address this by showing the frequency of individual terms can be predicted with some accuracy from measurements of LRE presence. We motivate this approach as a possible way to detect the training data of closed-data LMs; however, we are not able to make any guarantees on its efficacy in settings not shown here, and would caution drawing strong conclusions without additional information. Furthermore, we find that our method is relatively worse at predicting subject-object co-occurrences than object occurrences, and our method fails to account for the harder task. Future work could expand on this tool by incorporating it with other data inference methods for greater confidence. We also do not discuss the role of the presentation of facts on the formation of LRE features, but following Elsahar et al. (2018) and the strength of the relationship we find, we speculate this has minimal impact. Note that the BatchSearch tool we release tracks the exact position index of the searched terms, thus facilitating future work on questions about templates and presentation of information." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.651, + 0.624, + 0.667 + ], + "angle": 0, + "content": "B EFFECT OF TRAINING ON INCORRECT EXAMPLES" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.682, + 0.827, + 0.822 + ], + "angle": 0, + "content": "In Hernandez et al. (2024), examples are filtered to ones in which the LM gets correct, assuming that an LRE will only exist once a model has attained the knowledge to answer the relation accuracy (e.g., knowing many country capitals). We find that the choice of examples for fitting LREs is not entirely dependent on the model 'knowing' that relation perfectly (i.e., attains high accuracy). This is convenient for our study, where we test early checkpoint models, that do not necessarily have all of the information that they will have seen later in training. In Figure 5, we show faithfulness on relations where the LRE was fit with all, half, or zero correct examples. We omit data for which the model did not get enough incorrect examples. Averages across relations for which we have enough data are shown in Figure 4, which shows that there is not a considerable difference in the choice of LRE samples to train with." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.841, + 0.49, + 0.858 + ], + "angle": 0, + "content": "C LRE HYPERPARAMETER TUNING" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.872, + 0.825, + 0.902 + ], + "angle": 0, + "content": "There are three hyperparameters for fitting LREs: layer at which to edit the subject, the beta term used to scale the LRE weight matrix, and the rank of the pseudoinverse matrix used to make edits for" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.747, + 0.925 + ], + "angle": 0, + "content": "6 And thus mechanistic, in the narrow technical sense of the term (Saphra & Wegreffe, 2024)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.511, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.134, + 0.498, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.134, + 0.822, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.321 + ], + "angle": 0, + "content": "Figure 4: Average Causality and Faithfulness results across relations depending on if the LRE was fit with correct or incorrect samples. We find no notable difference in the choice of examples." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.395, + 0.825, + 0.818 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.832, + 0.825, + 0.888 + ], + "angle": 0, + "content": "Figure 5: Causality and Faithfulness results for each relation depending on if the LRE was fit with correct or incorrect samples. Note that relations with only one bar do not have zeros in the other categories. It means that there was not enough data that the model (OLMo-7B) got wrong to have enough examples to fit." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.424, + 0.116, + 0.576, + 0.127 + ], + "angle": 0, + "content": "Best Layer Beta vs. Faithfulness" + }, + { + "type": "image", + "bbox": [ + 0.203, + 0.135, + 0.818, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.535, + 0.825, + 0.579 + ], + "angle": 0, + "content": "Figure 6: OLMo 0424 7B per layer faithfulness scores as a function of the choice of layer at which to fit the LRE. Note we do not use these results to choose the layer for the LRE, instead preferring the results from the causality sweep." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.605, + 0.825, + 0.744 + ], + "angle": 0, + "content": "measuring causality. Beta is exclusive to measuring faithfulness and rank is exclusive to causality. We test the same ranges for each as in Hernandez et al. (2024): [0, 5] beta and [0, full_rank] for causality at varying intervals. Those intervals are every 2 from [0,100], every 5 from [100,200], every 25 from [200, 500], every 50 from [500, 1000], every 250 from [1000, hidden_size]. We perform the hyperparameter sweeps across faithfulness and causality, but we choose the layer to edit based on the causality score. In cases where this is not the same layer as what faithfulness would decide, we use the layer causality chooses, as it would not make sense to train one LRE for each metric. We refer the reader to Hernandez et al. (2024) for more details on the interactions between hyperparameters and the choice of layer. The results of our sweeps on OLMo-7B across layers in Figures 6 and 7 and across beta and rank choices in Figures 8 and 9." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.766, + 0.628, + 0.781 + ], + "angle": 0, + "content": "D BATCH SEARCH COUNTS COMPARED TO WIMBD" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.799, + 0.825, + 0.843 + ], + "angle": 0, + "content": "In Figure 10, we find that What's in My Big Data (Elazar et al., 2024) matches very well to batch search co-occurrences; however, WIMBD tends to over-predict co-occurrences (slope less than 1), due to the sequence length being shorter than many documents, as discussed in the main paper." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.864, + 0.589, + 0.879 + ], + "angle": 0, + "content": "E FEATURE CORRELATIONS AND IMPORTANCES" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Our feature importance test is shown in Figure 12. This permutation test was done on the heldout data to show which features contribute the most to generalization performance. We use PCA to" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.454, + 0.296, + 0.546, + 0.307 + ], + "angle": 0, + "content": "Layer vs. Causality" + }, + { + "type": "image", + "bbox": [ + 0.205, + 0.315, + 0.819, + 0.678 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.715, + 0.825, + 0.743 + ], + "angle": 0, + "content": "Figure 7: OLMo 0424 7B per layer causality scores as a function of the choice of layer at which to fit the LRE." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.416, + 0.295, + 0.571, + 0.307 + ], + "angle": 0, + "content": "Best Layer Beta vs. Faithfulness" + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.314, + 0.819, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.209, + 0.724, + 0.789, + 0.74 + ], + "angle": 0, + "content": "Figure 8: OLMo 0424 7B LRE Beta hyperparameter sweep at highest performing layer." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.429, + 0.302, + 0.57, + 0.313 + ], + "angle": 0, + "content": "Best Layer Rank vs. Causality" + }, + { + "type": "image", + "bbox": [ + 0.207, + 0.321, + 0.816, + 0.684 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.207, + 0.721, + 0.79, + 0.737 + ], + "angle": 0, + "content": "Figure 9: OLMo 0424 7B LRE Rank hyperparameter sweep at highest performing layer." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.129, + 0.79, + 0.151 + ], + "angle": 0, + "content": "WIMBD vs Batch Cooccurrence. slope=0.94, r=0.99" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.186, + 0.717, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.377, + 0.417, + 0.622, + 0.436 + ], + "angle": 0, + "content": "WIMBD Cooccurrence" + }, + { + "type": "image_caption", + "bbox": [ + 0.201, + 0.475, + 0.797, + 0.491 + ], + "angle": 0, + "content": "Figure 10: Comparison between WIMBD and Batch Search subject-object co-occurrences" + }, + { + "type": "image", + "bbox": [ + 0.173, + 0.53, + 0.492, + 0.771 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.53, + 0.825, + 0.768 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.784, + 0.825, + 0.841 + ], + "angle": 0, + "content": "Figure 11: Correlations between each feature in our regression analysis. Because of the high correlation between faithfulness metrics, we use a single dimensional PCA to attain one feature that captures \\(89\\%\\) of the variance of both for the purposes of doing feature importance tests. Note that we zero out the diagonal (which has values of 1) for readability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "reduce the faithfulness features to one feature for the purposes of this test. Correlations are shown in Figure 11" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "title", + "bbox": [ + 0.325, + 0.099, + 0.662, + 0.123 + ], + "angle": 0, + "content": "Permutation Importances" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.15, + 0.825, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.382, + 0.825, + 0.412 + ], + "angle": 0, + "content": "Figure 12: Hard causality is by far the most important feature for generalizing to new relations when predicting Object frequencies, causing a change in about \\(15\\%\\) accuracy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.435, + 0.669, + 0.451 + ], + "angle": 0, + "content": "F RELATIONSHIP BETWEEN CAUSALITY AND ACCURACY" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.467, + 0.825, + 0.51 + ], + "angle": 0, + "content": "In this section, we provide more detail on the relationship between the formation of linear representations and accuracy on in-context learning tasks. Although the two are very highly correlated, we argue that accuracy and LRE formation are somewhat independent." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.515, + 0.763, + 0.532 + ], + "angle": 0, + "content": "We show this relationship across training for OLMo-1B in Figure 13 and 7B in Figure 14." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.55, + 0.58, + 0.567 + ], + "angle": 0, + "content": "G EXTENDING TO COMMONSENSE RELATIONS" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.582, + 0.827, + 0.736 + ], + "angle": 0, + "content": "Following Elsahar et al. (2018), we focus on factual relations because subject-object co-occurrences are shown to be a good proxy for mentions of the fact. For completeness, we consider 8 additional commonsense relations here. Results for OLMo-7B are shown in Figure 15. We show that frequency is correlated with causality score (.42) in these cases as well, but it is possible subject-object frequencies do not accurately track occurrences of the relation being mentioned. For example, in the \"task person type\" relation, the co-occurrence count of the subject \"researching history\" and the object \"historian\" does not convincingly describe all instances where the historian concept is defined during pretraining. Co-occurrences are perhaps more convincingly related to how a model learns that the outside of a coconut is brown, however (the fruit outside color relation). Therefore, we caution treating these under the same lens as the factual relations. Nevertheless, we believe these results are an interesting perspective on how a different relation family compares to factual relations." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.394, + 0.289, + 0.604, + 0.302 + ], + "angle": 0, + "content": "Zero Shot, 5 Shot, Causality: OLMo 1B" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.305, + 0.825, + 0.697 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.715, + 0.825, + 0.743 + ], + "angle": 0, + "content": "Figure 13: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-1B" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.48, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at ICLR 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.394, + 0.144, + 0.604, + 0.156 + ], + "angle": 0, + "content": "Zero Shot, 5 Shot, Causality: OLMo 7B" + }, + { + "type": "image", + "bbox": [ + 0.172, + 0.161, + 0.825, + 0.552 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.57, + 0.825, + 0.598 + ], + "angle": 0, + "content": "Figure 14: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-7B" + }, + { + "type": "image_caption", + "bbox": [ + 0.18, + 0.675, + 0.667, + 0.691 + ], + "angle": 0, + "content": "OLMo-7B 0424 Development of Commonsense LREs over Training Time" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.706, + 0.825, + 0.859 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.236, + 0.872, + 0.759, + 0.888 + ], + "angle": 0, + "content": "Figure 15:Commonsense relations compared to pretraining time in OLMo-7B." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_origin.pdf b/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b29ceb208e9f3ef5c30eda472712e64f4ee45c18 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/9956eeb3-00c8-414d-bf51-c94c41a6c788_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b40eeabd2b48d28975bc790088819e1a2b7c5a00f27c006fcac19c01824ca22d +size 1475109 diff --git a/data/2025/2504_12xxx/2504.12459/full.md b/data/2025/2504_12xxx/2504.12459/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ab655f15e173562674453c6d9c215d0b2abcba3b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/full.md @@ -0,0 +1,325 @@ +# ON LINEAR REPRESENTATIONS AND PRETRAINING DATA FREQUENCY IN LANGUAGE MODELS + +Jack Merullo $^{\diamond}$ Noah A. Smith $^{\text{♣}}$ Sarah Wiegrefe $^{\text{♥♣}}$ Yanai Elazar $^{\text{♥♣}}$ + +$\diamond$ Brown University, $\diamond$ Allen Institute for AI (Ai2), $\clubsuit$ University of Washington + +*Co-senior authors. + +jack_merullo@brown.edu, {noah, sarahw, yanaie}@allenai.org + +# ABSTRACT + +Pretraining data has a direct impact on the behaviors and quality of language models (LMs), but we only understand the most basic principles of this relationship. While most work focuses on pretraining data's effect on downstream task behavior, we investigate its relationship to LM representations. Previous work has discovered that, in language models, some concepts are encoded 'linearly' in the representations, but what factors cause these representations to form (or not)? We study the connection between pretraining data frequency and models' linear representations of factual relations (e.g., mapping France to Paris in a capital prediction task). We find evidence that the formation of linear representations is strongly connected to pretraining term frequencies; specifically for subject-relation-object fact triplets, both subject-object co-occurrence frequency and in-context learning accuracy for the relation are highly correlated with linear representations. This is the case across all phases of pretraining, i.e., it is not affected by the model's underlying capability. In OLMo-7B and GPT-J (6B), we discover that a linear representation consistently (but not exclusively) forms when the subjects and objects within a relation co-occur at least 1k and 2k times, respectively, regardless of when these occurrences happen during pretraining (and around 4k times for OLMo-1B). Finally, we train a regression model on measurements of linear representation quality in fully-trained LMs that can predict how often a term was seen in pretraining. Our model achieves low error even on inputs from a different model with a different pretraining dataset, providing a new method for estimating properties of the otherwise-unknown training data of closed-data models. We conclude that the strength of linear representations in LMs contains signal about the models' pretraining corpora that may provide new avenues for controlling and improving model behavior: particularly, manipulating the models' training data to meet specific frequency thresholds. We release our code to support future work. $^{1}$ + +# 1 INTRODUCTION + +Understanding how the content of pretraining data affects language model (LM) behaviors and performance is an active area of research (Ma et al., 2024; Xie et al., 2023; Aryabumi et al., 2025; Longpre et al., 2024; Wang et al., 2025; Seshadri et al., 2024; Razeghi et al., 2023; Wang et al., 2024). For instance, it has been shown that for specific tasks, models perform better on instances containing higher frequency terms than lower frequency ones (Razeghi et al., 2022; Mallen et al., 2023; McCoy et al., 2024). However, the ways in which frequency affects the internal representations of LMs to cause this difference in performance remain unclear. We connect dataset statistics to recent work in interpretability, which focuses on the emergence of simple linear representations of factual relations in LMs Hernandez et al. (2024); Chanin et al. (2024). Our findings demonstrate a strong correlation between these linear representations and the frequency of terms in the pretraining corpus. + +Linear representations in LMs have become central to interpretability research in recent years (Ravfogel et al., 2020; Elazar et al., 2021; Elhage et al., 2021; Slobodkin et al., 2023; Olah et al., 2020; Park et al., 2024; Jiang et al., 2024; Black et al., 2022; Chanin et al., 2024). Linear representations are essentially linear approximations (linear transforms, directions in space) that are simple to understand, and strongly approximate the complex non-linear transformations that networks are implementing. These representations are crucial because they allow us to localize much of the behavior and capabilities of LMs to specific directions in activation space. This allows for simple interventions to control model behaviors, i.e., steering (Todd et al., 2024; Subramani et al., 2022; Hendel et al., 2023; Rimsky et al., 2024). + +Recent work by Hernandez et al. (2024) and Chanin et al. (2024) highlight how the linearity of different types of relations varies greatly depending on the specific relationships being depicted. For example, over $80\%$ of entities in the "country-largest-city" relation, but less than $30\%$ of entities in the "star-in-constellation" relation can be approximated this way (Hernandez et al., 2024). Such findings complicate the understanding of the Linear Representation Hypothesis, which proposes that LMs will represent features linearly (Park et al., 2024) without providing when/why these form. While Jiang et al. (2024) provide both theoretical and empirical evidence that the training objectives of LMs implicitly encourage linear representations, it remains unclear why some features are represented this way while others are not. This open question is a central focus of our investigation. + +Whether linear representations for "common" concepts are more prevalent in models or simply easier to identify (using current methods) than those for less common concepts remains unclear. We hypothesize that factual relations exhibiting linear representations are correlated with higher mention frequencies in the pretraining data (as has been shown with static embeddings, see Ethayarajh et al., 2019), which we confirm in Section 4. Our results also indicate that this can occur at any point in pretraining, as long as a certain average frequency is reached across subject-object pairs in a relation. In order to count the appearance of terms in data corpora throughout training, we develop an efficient tool for counting tokens in tokenized batches of text, which we release to support future work in this area. We also explore whether the presence of linear representations can provide insights into relation term frequency. In Section 5, we fit a regression model to predict the frequency of individual terms (such as "The Beatles") in the pretraining data, based on metrics measuring the presence of a linear representation for some relation. For example, how well a linear transformation approximates the internal computation of the "lead-singer-of" relation mapping "John Lennon" to "The Beatles" can tell us about the frequency of those terms in the pretraining corpus. + +Our findings indicate that the predictive signal, although approximate, is much stronger than that encoded in log probabilities and task accuracies alone, allowing us to estimate the frequencies of held-out relations and terms within approximate ranges. Importantly, this regression model generalizes beyond the specific LM it was trained on without additional supervision. This provides a valuable foundation for analyzing the pretraining corpora of closed-data models with open weights. + +To summarize, in this paper we show that: + +1. The development of linear representations for factual recall relations in LMs is related to frequency as well as model size. +2. Linear representations form at predictable frequency thresholds during training, regardless of when this frequency threshold is met for the nouns in the relation. The formation of these representations also correlates strongly with recall accuracy. +3. Measuring the extent to which a relation is represented linearly in a model allows us to predict the approximate frequencies of individual terms in the pretraining corpus of that model, even when we do not have access to the model's training data. +4. We release a tool for accurately and efficiently searching through tokenized text to support future research on training data. + +![](images/bba2f19318fa711c44305749d8472200aa8f1ab50e866a7ac5021954af5081a2.jpg) +Figure 1: Overview of this work. Given a dataset of subject-relation-object factual relation triplets, we count subject-object co-occurrences throughout pretraining batches. We then measure how well the corresponding relations are represented within an LM across pretraining steps, using the Linear Relational Embeddings (LRE) method from Hernandez et al. (2024). We establish a strong relationship between average co-occurrence frequency and a model's tendency to form linear representations for relations. From this, we show that we can predict frequencies in the pretraining corpus + +# 2 BACKGROUND + +# 2.1 LINEAR REPRESENTATIONS + +Vector space models have a long history in language processing, where geometric properties of these spaces were used to encode semantic information (Salton et al., 1975; Paccanaro & Hinton, 2001). When and why linear structure emerges without explicit bias has been of considerable interest since the era of static word embeddings. Work on skipgram models (Mikolov et al., 2013a) found that vector space models of language learn regularities which allow performing vector arithmetic between word embeddings to calculate semantic relationships (e.g., France - Paris + Spain = Madrid) (Mikolov et al., 2013b; Pennington et al., 2014). This property was subject to much debate, as it was not clear why word analogies would appear for some relations and not others (Köper et al., 2015; Karpinska et al., 2018; Gladkova et al., 2016). Followup work showed that linguistic regularities form in static embeddings for relations under specific dataset frequency constraints for relevant terms (Ethayarajh et al., 2019), but does not clearly relate to how modern LMs learn. More recently, there has been renewed interest in the presence of similar linear structure in models with contextual embeddings like transformer language models (Park et al., 2024; Jiang et al., 2024; Merullo et al., 2024). As a result, there are many ways to find and test for linear representations in modern LMs, though the relationship to pretraining data was not addressed (Huben et al., 2024; Gao et al., 2025; Templeton et al., 2024; Rimsky et al., 2024; Todd et al., 2024; Hendel et al., 2023; Hernandez et al., 2024; Chanin et al., 2024). Many of these share similarities in how they compute and test for linear representations. We focus on a particular class of linear representations called Linear Relational Embeddings (LREs) (Paccanaro & Hinton, 2001). + +Linear Relational Embeddings (LREs) Hernandez et al. (2024) use a particular class of linear representation called a Linear Relational Embedding (Paccanaro & Hinton, 2001) to approximate the computation performed by a model to predict the objects that complete common subject-relation-object triplets as an affine transformation. This transform is calculated from a hidden state $\mathbf{s}$ , the subject token representation at some middle layer of the model, to $\mathbf{o}$ , the hidden state at the last token position and layer of the model (i.e., the final hidden state that decodes a token in an autoregressive transformer) within a natural language description of the relation. For example, given the input sequence "Miles Davis (subject) plays the (relation)", the goal is to approximate the computation of the object "trumpet", assuming the model predicts the object cor- + +directly. It was found that this transformation holds for nearly every subject and object in the relation set (such as "Cat Stevens plays the guitar") for some relations. This is surprising because, despite the nonlinearities within the many layers and token positions separating s and o, a simple structure within the representation space well approximates the model's prediction process for a number of factual relations. In this work we study LREs under the same definition and experimental setup, because it allows us to predefine the concepts we want to search for (e.g., factual relations), as well as use a handful of representations to relate thousands of terms in the dataset by learning linear representations on a per-relation level. + +Hernandez et al. calculate LREs to approximate an LM's computation as a first-order Taylor Series approximation. Let $F(\mathbf{s}, c) = \mathbf{o}$ be the forward pass through a model that produces object representation $\mathbf{o}$ given subject representation $\mathbf{s}$ and a few-shot context $c$ , this computation is approximated as $F(\mathbf{s}, c) \approx W\mathbf{s} + b = F(\mathbf{s}_i, c) + W(\mathbf{s} - \mathbf{s}_i)$ where we approximate the relation about a specific subject $\mathbf{s}_i$ . Hernandez et al. propose to compute $W$ and $b$ using the average of $n$ examples from the relation $(n = 8$ here) with $\frac{\partial F}{\partial\mathbf{s}}$ representing the Jacobian Matrix of $F$ : + +$$ +W = \mathbb {E} _ {\mathbf {s} _ {i}, c _ {i}} \left[ \left. \frac {\partial F}{\partial \mathbf {s}} \right| _ {(\mathbf {s} _ {i}, c _ {i})} \right] \quad \text {a n d} \quad b = \mathbb {E} _ {\mathbf {s} _ {i}, c _ {i}} \left[ \left. F (\mathbf {s}, c) - \frac {\partial F}{\partial \mathbf {s}} \right| _ {(\mathbf {s} _ {i}, c _ {i})} \right] \tag {1} +$$ + +In practice, LREs are estimated using hidden states from LMs during the processing of the test example in a few-shot setup. For a relation like "instrument-played-by-musician", the model may see four examples (in the form "[X] plays the [Y]") and on the fifth example, when predicting e.g., "trumpet" from "Miles Davis plays the", the subject representation s and object representation o are extracted. + +# 2.2 INFERRING TRAINING DATA FROM MODELS + +There has been significant interest recently in understanding the extent to which it is possible to infer the training data of a fully trained neural network, including LMs, predominantly by performing membership inference attacks (Shokri et al., 2017; Carlini et al., 2022), judging memorization of text (Carlini et al., 2023; Oren et al., 2024; Shi et al., 2024), or inferring the distribution of data sources (Hayase et al., 2024; Ateniese et al., 2015; Suri & Evans, 2022). Our work is related in that we find hints of the pretraining data distribution in the model itself, but focus on how linear structure in the representations relates to training data statistics. + +# 3 METHODS + +Our analysis is twofold: counts of terms in the pretraining corpus of LMs, and measurements of how well factual relations are approximated by affine transformations. We use the OLMo model v1.7 (0424 7B and 0724 1B) (Groeneveld et al., 2024) and GPT-J (6B) (Wang & Komatsuzaki, 2021) and their corresponding datasets: Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), respectively. To understand how these features form over training time, we test eight model checkpoints throughout training in the OLMo family of models (Groeneveld et al., 2024). + +# 3.1 LINEAR RELATIONAL EMBEDDINGS (LRES) IN LMS + +We use a subset of the RELATIONS dataset Hernandez et al. (2024), focusing on the 25 factual relations of the dataset, such as capital-city and person-mother (complete list in Appendix B). Across these relations, there are 10,488 unique subjects and objects. Following Hernandez et al. (2024), we fit an LRE for each relation on 8 examples from that relation, each with a 5-shot prompt. We use the approach from this work as described in Section 2.1. + +Fitting LREs Hernandez et al. (2024) find that Equation 1 underestimates the optimal slope of the linear transformation, so they scale each relation's $W$ by a scalar hyperparameter $\beta$ . Unlike the original work, which finds one $\beta$ per model, we use one $\beta$ per relation, as this avoids disadvantageing specific relations. Another difference in our calculation of LREs is that we do not impose the constraint that the model has to predict the answer correctly to be used as one of the 8 examples used to approximate the Jacobian Matrix. Interestingly, using examples that models predict incorrectly to fit Equation 1 works as well as using only correct examples. We opt to use this variant as it allows us to compare different checkpoints and models (\$4) with linear transformations trained on the same 8 examples, despite the fact that the models make different predictions on these instances. We explore the effect of example choice in Appendix B and find that it does not make a significant difference. We also explore the choice of layer in Appendix C. + +Metrics To evaluate the quality of LREs, Hernandez et al. (2024) introduce two metrics that measure the quality of the learned transformations. Faithfulness measures whether the transformation learned by the LRE produces the same object token prediction as the original LM. Causality measures the proportion of the time a prediction of an object can be changed to the output of a different example from the relation (e.g., editing the Miles Davis subject representation so that the LM predicts he plays the guitar, instead of the trumpet). For specifics on implementation, we refer the reader to Hernandez et al. (2024). We consider an LRE to be high 'quality' when it scores highly on these metrics, as this measures when an LRE works across subject-object pairs within the relation. In general, we prefer to use causality in our analysis, as faithfulness can be high when LMs predict the same token very often (like in early checkpoints). + +# 3.2 COUNTING FREQUENCIES THROUGHOUT TRAINING + +A key question we explore is how term frequencies affect the formation of linear representations. We hypothesize that more commonly occurring relations will lead to higher quality LREs for those relations. Following Elsahar et al. (2018); Elazar et al. (2022), we count an occurrence of a relation when a subject and object co-occur together. While term co-occurrence is used as a proxy for the frequency of the entire triplet mentioned in text, Elsahar et al. (2018) show that this approximation is quite accurate. We now discuss how to compute these co-occurrence counts. + +What's in My Big Data? (WIMBD) Elazar et al. (2024) index many popular pretraining datasets, including Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), and provide search tools that allow for counting individual terms and co-occurrences within documents. However, this only gives us counts for the full dataset. Since we are interested in counting term frequencies throughout pretraining, we count these within training batches of OLMo instead. When per-batch counts are not available, WIMBD offers a good approximation for final checkpoints, which is what we do in the case of GPT-J. We compare WIMBD co-occurrence counts to the Batch Search method (described below) for the final checkpoint of OLMo in Appendix D, and find that the counts are extremely close: The slope of the best fit line for BatchCount against WIMBDCount is .94, because co-occurrence counts are overestimated when considering the whole document. + +Batch Search Data counting tools cannot typically provide accurate counts for model checkpoints at arbitrary training steps. Thus, we design a tool to efficiently count exact co-occurrences within sequences of tokenized batches. This also gives us the advantage of counting in a way that is highly accurate to how LMs are trained; since LMs are trained on batches of fixed lengths which often split documents into multiple sequences, miscounts may occur unless using tokenized sequences. Using this method, we note every time one of our 10k terms appears throughout a dataset used to pretrain an LM. We count a co-occurrence as any time two terms appear in the same sequence within a batch (a (batch-size, sequence-length) array). We search 10k terms in the approximately 2T tokens of Dolma (Soldaini et al., 2024) this way. Using our implementation, we are able to complete this on 900 CPUs in about a day. To support future work, we release our code as Cython bindings that integrate out of the box with existing libraries. + +OLMo-7B 0424 Development of LREs over Training Time +![](images/992f1844b981d1812454b47d5c56dafde0f907001c4704149adb3c14d065df3a.jpg) +Final Model + +
ModelCo-Occurrence Threshold (Mean Causality >.9)
GPT-J (6B)1,097
OLMo-7B1,998
OLMo-1B4,447
+ +![](images/ad620ec86229e5de638a565490e5d6940af4a72fde26a3c5f92185684ac231db.jpg) +OLMo-1B 0724 Development of LREs over Training Time + +GPT-J Development of LREs over Training Time +Figure 2: We find that LREs have consistently high causality scores across relations after some average frequency threshold is reached (table, top right). In OLMo models, red dots show the model's LRE performance at 41B tokens, and blue dots show the final checkpoint performance (550k steps in 7B). Gray dots show intermediate checkpoints. We highlight Even at very early training steps, if the average subject-object cooc. count is high enough, the models are very likely to already have robust LREs formed in the representation space. Symbols represent different relations. Highlighted relations are shown in darker lines. +![](images/1cd813e2fb83f1b4351e3155ec2db6833f96e31dd0f85d6bd6bd9a6cf6a6115e.jpg) +41B Tokens (10k steps) + +# 4 FREQUENCY OF SUBJECT-OBJECT CO-OCCURRENCES ALIGNS WITH EMERGENCE OF LINEAR REPRESENTATIONS + +In this section, we explore when LREs begin to appear at training time and how these are related to pretraining term frequencies. Our main findings are that (1) average co-occurrence frequency within a relation strongly correlates with whether an LRE will form; (2) the frequency effect is independent of the pretraining stage; if the average subject-object co-occurrence for a relation surpasses some threshold, it is very likely to have a high-quality LRE, even for early pretraining steps. + +# 4.1 SETUP + +Using the factual recall relations from the Hernandez et al. (2024) dataset, we use the Batch Search method (§3.2) to count subject and object co-occurrences within sequences in Dolma (Soldaini et al., 2024) used to train the OLMo-1B (v. 0724) and 7B (v. 0424) models (Groeneveld et al., 2024). The OLMo family of models provides tools for accurately recreating the batches from Dolma, which allow us to reconstruct the data the way the model was trained. We also use GPT-J (Wang & Komatsuzaki, 2021) and the Pile (Gao et al., 2020) as its training data, but since we do not have access to accurate batches used to train it, we use WIMBD (Elazar et al., 2024) to count subject-object counts in the entire data. We fit LREs on each relation and model separately. Hyperparameter sweeps are in Appendix C. OLMo also releases intermediate checkpoints, which we use to track development over pretraining time. We use checkpoints that have seen {41B, 104B, 209B, 419B, 628B, 838B, 1T, and 2T} tokens.3 We use the Pearson coefficient for measuring correlation. + +# 4.2 RESULTS + +Our results are summarized in Figure 2. We report training tokens because the step count differs between 7B and 1B. Co-occurrence frequencies highly correlate with causality $(r = 0.82)$ . This + +is notably higher than the correlations with subject frequencies: $r = 0.66$ , and object frequencies: $r = 0.59$ for both OLMo-7B and OLMo-1B, respectively. + +We consider a causality score above 0.9 to be nearly perfectly linear. The table in Figure 2 shows the co-occurrence counts above which the average causality is above 0.9 and is shown by dashed black lines on the scatterplots. Regardless of pretraining step, models that surpass this threshold have very high causality scores. Although we cannot draw conclusions from only three models, it is possible that scale also affects this threshold: OLMo-7B and GPT-J (6B params) require far less exposure than OLMo-1B. + +# 4.3 RELATIONSHIP TO ACCURACY + +Increased frequency (or a proxy for it) was shown to lead to better factual recall in LMs (Chang et al., 2024; Mallen et al., 2023). However, it remains unknown whether high accuracy entails the existence of a linear relationship. Such a finding would inform when we expect an LM to achieve high accuracy on a task. We find that the correlation between causality and subject-object frequency is higher than with 5-shot accuracy (0.82 v.s. 0.74 in OLMo-7B), though both are clearly high. In addition, there are a few examples of high accuracy relations that do not form single consistent LREs. These relations are typically low frequency, such as star constellation name, which has $84\%$ 5-shot accuracy but only $44\%$ causality (OLMo-7B), with subjects and objects only co-occurring about 21 times on average across the full dataset. In general, few-shot accuracy closely tracks causality, consistent with arguments that in-context learning allows models to identify linear mappings between input-output pairs (Hendel et al., 2023; Garg et al., 2022). We find that causality increases first in some cases, like "food-from-country" having a causality of $65\%$ but a 5-shot accuracy of only $42\%$ . This gap is consistently closed through training. In the final model, causality and 5-shot accuracy are within $11\%$ on average. We report the relationship between every relation, zero-shot, and few-shot accuracy for OLMo models across training in Appendix F. + +A fundamental question in the interpretability community is under what circumstances linear structures form. While previous work has shown that the training objective encourages this type of representation (Jiang et al., 2024), our results suggest that the reason why some concepts form a linear representation while others do not is strongly related to the pretraining frequency. + +# 5 LINEAR REPRESENTATIONS HELP PREDICT PRETRAINING CORPUS FREQUENCIES + +In this section, we aim to understand this relationship further by exploring what we can understand about pretraining term frequency from linearity of LM representations. We target the challenging problem of predicting how often a term, or co-occurrence of terms, appears in an LM's training data from the representations alone. Such prediction model can be useful, if it generalizes, when applied to other models whose weights are open, but the data is closed. For instance, such predictive model could tell us whether a model was trained on specific domains (e.g., Java code) by measuring the presence of relevant LREs. First, we show that LRE features encode information about frequency that is not present using probabilities alone. Then, we show how a regression fit on one model generalizes to the features extracted from another without any information about the new model's counts. + +# 5.1 EXPERIMENTAL SETUP + +We fit a regression to the Relations dataset (Hernandez et al., 2024) using OLMo-7B LRE features and log probabilities. We fit 24 models such that each relation is held out once per random seed across 4 seeds. We train a random forest regression model with 100 decision tree estimators to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting "John Lennon" and "The Beatles" or just "The Beatles") from one of two sets of features. Our baseline set of features is based on likelihood of recalling a fact. Given some few-shot context from the relations dataset ("John Lennon is a lead singer of") we extract the log probability of the correct answer, as well as the average accuracy on this prompt across 5 trials. The intuition is that models will be more confident about highly frequent terms. The other set of features include the first, as well as faithfulness and causality measurement. + +![](images/e7c13d5cff0901794019ccc7b0ddce6b894588c779c409326c18dc7019fafaf9.jpg) +Figure 3: Within-Magnitude accuracy (aka the proportion of predictions within one order of magnitude of ground truth) for models predicting object and subject-object co-occurrences in heldout relations. Using LRE features outperforms LM only features by about $30\%$ . We find that it is much easier to predict object frequencies; the subj-object prediction models with LRE features only marginally outperform baseline performance. + +We use Faithfulness and Causality as defined in Hernandez et al. (2024) as well as two other metrics: Faith Prob., which is the log probability of the correct answer as produced by an LRE, and Hard Causality, which is the same as the "soft" variant, but only counts the proportion of times the causality edit produces the target answer as the number one prediction. We use every example from the relations for which there are more than one object occurrence or subject-object co-occurrence. We do not provide an explicit signal for which relation an example comes from, but due to the bias of subjects/objects having similar frequencies within a relation, we train multiple models and evaluate on held out relations and average performance. In all settings, the held out set objects and relations are guaranteed to not have been in the training set. + +# 5.2 LRE METRICS ENCODE FINE-GRAINED FREQUENCY INFORMATION + +Because of the difficulty of predicting the exact number of occurrences, we report accuracy within one order of magnitude of the ground truth. This measures whether the predicted value is within a reasonable range of the actual value. Results are shown in Figure 3. We find that language modeling features do not provide any meaningful signal towards predicting object or subject-object frequencies, and are only marginally above the baseline of predicting the average or random frequencies from the training data. On object frequency predictions, we find that LRE features encode a strong signal allowing for accurate predictions about $70\%$ of the time. Mean absolute error of the predictions (in natural log space) for LRE features (LM-only features) are 2.1, (4.2) and 1.9, (2.3) on object prediction and subject-object prediction tasks, respectively. We find that subject-object cooccurrence frequency is likely too difficult to predict given the signals that we have here, as our predictions are higher than, but within one standard deviation of the mean baseline. + +Feature Importance: How important are LRE features for predicting the frequency of an item? We perform feature permutation tests to see how much each feature (LRE features and log probts) contributes to the final answer. First, we check to see which features used to fit the regression are correlated, as if they are, then perturbing one will leave the signal present in another. In Appendix E, we show that only faithfulness and faith probability are strongly correlated, so for this test only, we train models with a single PCA component representing $89\%$ of the variance of those two features. We find that hard causality is by far the most important feature for generalization performance, causing a difference of about $15\%$ accuracy, followed by faithfulness measures with $5\%$ accuracy, providing evidence that the LRE features are encoding an important signal. + +# 5.3 GENERALIZATION TO A NEW LM + +Next, we test the ability to generalize the regression fit of one LM to another, without requiring further supervision. If such a model could generalize, we can predict term counts to models for which we do not have access to their pretraining data. We keep the objective the same and apply + +Table 1: Within-Magnitude accuracy for different settings of train and test models. Overall, we find that fitting a regression on one model's LREs and evaluating on the other provides a meaningful signal compared to fitting using only log probability and task performance, or predicting the average training data frequency. The metric here is proportion of predictions within one order of $10\mathrm{x}$ the ground truth. Here, Eval. on GPT-J means the regression is fit on OLMo and evaluated on GPT-J. + +
ModelPredicting Object Occs.Predicting Subject-Object Co-Occs.
Eval. on GPT-JEval. on OLMoEval. on GPT-JEval. on OLMo
LRE Features0.65±0.120.49±0.120.76±0.120.68±0.08
LogProb Features0.42±0.100.41±0.090.66±0.090.60±0.07
Mean Freq. Baseline0.31±0.150.41±0.170.57±0.150.67±0.16
+ +the regression model, fit for example on OLMo ("Train OLMo" setting), to features extracted from GPT-J, using ground truth counts from The Pile (and vice versa, i.e., the "Train GPT-J" setting). + +We again train a random forest regression model to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting "John Lennon" and "The Beatles" or just "The Beatles") on features from one of two models: either OLMo-7B (final checkpoint) or GPT-J, treating the other as the 'closed' model. We test the hypothesis that LRE features (faithfulness, causality) are useful in predicting term frequencies across different models, with the hope that this could be applied to dataset inference methods in the future, where access to the ground truth pretraining data counts is limited or unavailable. + +Results Our results are presented in Table 1. First, we find that there is a signal in the LRE features that does not exist in the log probability features: We are able to fit a much better generalizable model when using LRE features as opposed to the LM probabilities alone. Second, evaluating on the LRE features of a heldout model (scaled by the ratio of total tokens trained between the two models) maintains around the same accuracy when fit on exact counts from OLMo, allowing us to predict occurrences without access to the GPT-J pretraining data. We find that predicting either the subject-object co-occurrences or object frequencies using LREs alone is barely better than the baseline. This task is much more difficult than predicting the frequency of the object alone, but our model may just also be unable to account for outliers in the data, which is tightly clustered around the mean (thus giving the high mean baseline performance of between approx. $60 - 70\%$ ). Nevertheless, we show that linear structure for relations within LM representations encode a rich signal representing dataset frequency. + +# 5.4 ERROR ANALYSIS + +In Table 2 we show example predictions from our regression model that we fit on OLMo and evaluate on heldout relations with LREs measured on GPT-J. We find that some relations transfer more easily than others, with the star constellation name transferring especially poorly. In general, the regression transfers well, without performance deteriorating much (about $5\%$ accuracy: see Figure 3 compared to the evaluation of GPT-J in Table 1), suggesting LREs encode information in a consistent way across models. We also find that the regression makes use of the full prediction range, producing values in the millions (see Table 2) as well as in the tens; The same regression shown in the table also predicts 59 occurrences for "Caroline Bright" (Will Smith's mother) where the ground truth is 48. + +# 6 DISCUSSION + +Connection to Factual Recall Work in interpretability has focused largely around linear representations in recent years, and our work aims to address the open question of the conditions in which they form. We find that coherent linear representations form when the relevant terms (in this case subject-object co-occurrences) appear in pretraining at a consistent enough rate. Analogously, Chang et al. (2024) show that repeated exposure encourages higher retention of facts. Future work could investigate the connection between factual recall accuracy and linear representations. + +Table 2: Examples of a regression fit on OLMo LRE metrics and evaluated on GPT-J on heldout relations, demonstrating common error patterns: 1. Predictions are better for relations that are closer to those found in fitting the relation (country related relations), 2. Some relations, like star-constellation perform very poorly, possibly due to low frequency, 3. The regression model can be sensitive to the choice of subject (e.g., William vs. Harry), telling us the choice of data to measure LREs for is important for predictions. + +
Predicting Object Frequency in GPT-J, Regression fit on OLMo
RelationSubjectObjectPredictionGround TruthError
landmark-in-countryMenangle ParkAustralia2,986,9893,582,6021.2x
country-languageBrazilPortuguese845,406561,0051x
star-constellation nameArcturusBoötes974,5502,817346x
person-motherPrince WilliamPrincess Diana5,82627,0944.6x
person-motherPrince HarryPrincess Diana13127,094207x
+ +Linear Representations in LMs The difficulty of disentangling the formation of linear representations from increases in relation accuracy, especially in the few-shot case, is interesting. Across 24 relations, only the "star-constellation-name" and "product-by-company" relations have few-shot accuracies that far exceed their causality scores (and both are low frequency). Thus, it is still an open question how LMs are able to recall these tasks. The fact that few-shot accuracy and causality seem so closely linked is consistent with findings that ICL involves locating the right task (Min et al., 2022) and applying a 'function' to map input examples to outputs (Hendel et al., 2023; Todd et al., 2024). The finding that frequency controls this ability is perhaps unsurprising, as frequency also controls this linear structure emerging in static embeddings (Ethayarajh et al., 2019). Jiang et al. (2024) prove a strong frequency-based condition (based on matched log-odds between subjects and objects) and an implicit bias of gradient descent (when the frequency condition is not met) encourage linearity in LLMs; our work empirically shows conditions where linear representations tend to form in more realistic settings. If LMs are 'only' solving factual recall or performing ICL through linear structures, it is surprising how well this works at scale, but the simplicity also provides a promising way to understand LMs and ICL in general. An interesting avenue for future work would be to understand if and when LMs use a method that is not well approximated linearly to solve these types of tasks, as recent work has shown non-linearity can be preferred for some tasks in recurrent networks (Csordás et al., 2024). + +Future Work in Predicting Dataset Frequency The ability to predict the contents of pretraining data is an important area for investigating memorization, contamination, and privacy of information used to train models. In our approach, we show it is possible to extract pretraining data signal without direct supervision. Without interpretability work on the nature of representations in LMs, we would not know of this implicit dataset signal, and we argue that interpretability can generate useful insights more broadly as well. Extensions on this work could include more information to tighten the prediction bounds on frequency, such as extracting additional features from the tokenizer (Hayase et al., 2024). We hope this work encourages future research in other ways properties of pretraining data affect LM representations for both improving and better understanding these models. + +# 7 CONCLUSION + +We find a connection between linear representations of subject-relation-object factual triplets in LMs and the pretraining frequencies of the subjects and objects in those relations. This finding can guide future interpretability work in deciphering whether a linear representation for a given concept will exist in a model, since we observe that frequencies below a certain threshold for a given model will not yield LREs (a particular class of linear representation). From there we show that we can use the presence of linear representations to predict with some accuracy the frequency of terms in the pretraining corpus of an open-weights, closed-data model without supervision. Future work could aim to improve on our bounds of predicted frequencies. Overall, our work presents a meaningful step towards understanding the interactions between pretraining data and internal LM representations. + +# ACKNOWLEDGMENTS + +This work was performed while JM was an intern at Ai2. We thank the anonymous reviewers and members of the Aristo and AllenNLP teams at Ai2 for valuable feedback. + +# REFERENCES + +Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. To code or not to code? exploring impact of code in pretraining. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=zSfeN1uAcx. +Giuseppe Ateniese, Luigi V Mancini, Angelo Spognardi, Antonio Villani, Domenico Vitali, and Giovanni Felici. Hacking smart machines with smarter ones: How to extract meaningful data from machine learning classifiers. International Journal of Security and Networks, 10(3):137-150, 2015. URL https://dl.acm.org/doi/10.1504/IJSN.2015.071829. +Sid Black, Lee Sharkey, Leo Grinsztajn, Eric Winsor, Dan Braun, Jacob Merizian, Kip Parker, Carlos Ramón Guevara, Beren Millidge, Gabriel Alfour, and Connor Leahy. Interpreting neural networks through the polytope lens, 2022. URL https://arxiv.org/abs/2211.12312. +Nicholas Carlini, Steve Chien, Milad Nasr, Shuang Song, Andreas Terzis, and Florian Tramér. Membership inference attacks from first principles. In 2022 IEEE Symposium on Security and Privacy (SP), pp. 1897-1914, 2022. URL https://ieeexplore.ieee.org/document/9833649/. +Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. Quantifying memorization across neural language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=TatRHT_1cK. +Hoyeon Chang, Jinho Park, Seonghyeon Ye, Sohee Yang, Youngkyung Seo, Du-Seong Chang, and Minjoon Seo. How do large language models acquire factual knowledge during pretraining? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=TYdzj1EvBP. +David Chanin, Anthony Hunter, and Oana-Maria Camburu. Identifying Linear Relational Concepts in Large Language Models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1524-1535. Association for Computational Linguistics, 2024. doi: 10.18653/v1/2024.naacl-long.85. URL https://aclanthology.org/2024.naacl-long.85. +Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. Recurrent neural networks learn to store and generate sequences using non-linear representations. In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 248-262, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.17. URL https://aclanthology.org/2024.blackboxnlp-1.17/. +Yanai Elazar, Shauli Ravfogel, Alon Jacovi, and Yoav Goldberg. Amnesic Probing: Behavioral Explanation with Amnesic Counterfactuals. Transactions of the Association for Computational Linguistics, 9:160-175, 03 2021. URL https://doi.org/10.1162/tacl_a_00359. +Yanai Elazar, Nora Kassner, Shauli Ravfogel, Amir Feder, Abhilasha Ravichander, Marius Mosbach, Yonatan Belinkov, Hinrich Schütze, and Yoav Goldberg. Measuring causal effects of data statistics on language model's 'factual' predictions. arXiv preprint arXiv:2207.14251, 2022. URL https://arxiv.org/abs/2207.14251. + +Yanai Elazar, Akshita Bhagia, Ian Helgi Magnusson, Abhilasha Ravichander, Dustin Schwenk, Alane Suhr, Evan Pete Walsh, Dirk Groeneveld, Luca Soldaini, Sameer Singh, Hannaneh Hajishirzi, Noah A. Smith, and Jesse Dodge. What's in my big data? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RvfPnOkPV4. +Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 2021. URL https://transformer-circuits.pub/2021/framework/index.html. +Hady Elsahar, Pavlos Vougiouklis, Arslen Remaci, Christophe Gravier, Jonathon Hare, Frederique Laforest, and Elena Simperl. T-REx: A large scale alignment of natural language with knowledge base triples. In Nicoletta Calzolari, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Koiti Hasida, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, Stelios Piperidis, and Takenobu Tokunaga (eds.), Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan, May 2018. European Language Resources Association (ELRA). URL https://aclanthology.org/L18-1544. +Kawin Ethayarajh, David Duvenaud, and Graeme Hirst. Towards Understanding Linear Word Analogies. In Anna Korhonen, David Traum, and Lluís Márquez (eds.), Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 3253-3262. Association for Computational Linguistics, 2019. doi: 10.18653/v1/P19-1315. URL https://aclanthology.org/P19-1315. +Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020. URL https://arxiv.org/abs/2101.00027. +Leo Gao, Tom Dupre la Tour, Henk Tillman, Gabriel Goh, Rajan Troll, Alec Radford, Ilya Sutskever, Jan Leike, and Jeffrey Wu. Scaling and evaluating sparse autoencoders. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tcsZt9ZNKD. +Shivam Garg, Dimitris Tsipras, Percy Liang, and Gregory Valiant. What can transformers learn in-context? a case study of simple function classes. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=f1NZJ2eOet. +Anna Gladkova, Aleksandr Drozd, and Satoshi Matsuoka. Analogy-based detection of morphological and semantic relations with word embeddings: what works and what doesn't. In Jacob Andreas, Eunsol Choi, and Angeliki Lazaridou (eds.), Proceedings of the NAACL Student Research Workshop, pp. 8-15, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-2002. URL https://aclanthology.org/N16-2002/. +Dirk Groeneveld, Iz Beltagy, Evan Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. OLMo: Accelerating the science of language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15789-15809, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.841. URL https://aclanthology.org/2024.acl-long.841/. +Jonathan Hayase, Alisa Liu, Yejin Choi, Sewoong Oh, and Noah A. Smith. Data mixture inference: What do BPE tokenizers reveal about their training data? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=EHXyeImux0. +Roee Hendel, Mor Geva, and Amir Globerson. In-Context Learning Creates Task Vectors. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational + +Linguistics: EMNLP 2023, pp. 9318-9333. Association for Computational Linguistics, 2023. doi: 10.18653/v1/2023-findings-emnlp.624. URL https://aclanthology.org/2023-findings-emnlp.624. +Evan Hernandez, Arnab Sen Sharma, Tal Haklay, Kevin Meng, Martin Wattenberg, Jacob Andreas, Yonatan Belinkov, and David Bau. Linearity of relation decoding in transformer language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=w7LU2s14kE. +Robert Huben, Hoagy Cunningham, Logan Riggs Smith, Aidan Ewart, and Lee Sharkey. Sparse autoencoders find highly interpretable features in language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=F76bwRSLeK. +Yibo Jiang, Goutham Rajendran, Pradeep Kumar Ravikumar, Bryon Aragam, and Victor Veitch. On the origins of linear representations in large language models. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=otuTw4Mghk. +Marzena Karpinska, Bofang Li, Anna Rogers, and Aleksandr Drozd. Subcharacter information in Japanese embeddings: When is it worth it? In Georgiana Dinu, Miguel Ballesteros, Avirup Sil, Sam Bowman, Wael Hamza, Anders Sogaard, Tahira Naseem, and Yoav Goldberg (eds.), Proceedings of the Workshop on the Relevance of Linguistic Structure in Neural Architectures for NLP, pp. 28-37, Melbourne, Australia, July 2018. Association for Computational Linguistics. doi: 10.18653/v1/W18-2905. URL https://aclanthology.org/W18-2905/. +Maximilian Köper, Christian Scheible, and Sabine Schulte im Walde. Multilingual reliability and "semantic" structure of continuous word spaces. In Matthew Purver, Mehrnoosh Sadrzadeh, and Matthew Stone (eds.), Proceedings of the 11th International Conference on Computational Semantics, pp. 40-45, London, UK, April 2015. Association for Computational Linguistics. URL https://aclanthology.org/W15-0105/. +Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 3245-3276, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.179. URL https://aclanthology.org/2024.naacl-long.179/. +Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help LLMs reasoning? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KIPJKST4gw. +Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9802–9822, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.546. URL https://aclanthology.org/2023.acl-long.546. +R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Griffiths. Embers of autoregression show how large language models are shaped by the problem they are trained to solve. Proceedings of the National Academy of Sciences, 121(41):e2322420121, 2024. URL https://www.pnas.org/doi/abs/10.1073/pnas.2322420121. +Jack Merullo, Carsten Eickhoff, and Ellie Pavlick. Language models implement simple Word2Vec-style vector arithmetic. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational + +Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5030-5047, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.281. URL https://aclanthology.org/2024.nacl-long.281. +Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781, 2013a. URL https://arxiv.org/abs/1301.3781. +Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In C.J. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K.Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013b. URL https://proceedings.neurips.cc/paper_files/paper/2013/file/9aa42b31882ec039965f3c4923ce901b-Paper.pdf. +Sewon Min, Xinxi Lyu, Ari Holtzman, Mikel Artetxe, Mike Lewis, Hannaneh Hajishirzi, and Luke Zettlemoyer. Rethinking the role of demonstrations: What makes in-context learning work? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 11048-11064, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.759. URL https://aclanthology.org/2022.emnlp-main.759/. +Chris Olah, Nick Cammarata, Ludwig Schubert, Gabriel Goh, Michael Petrov, and Shan Carter. Zoom in: An introduction to circuits. Distill, 5(3):e00024-001, 2020. URL https://distill.pub/2020/circuits/zoom-in/. +Yonatan Oren, Nicole Meister, Niladri S. Chatterji, Faisal Ladhak, and Tatsunori Hashimoto. Proving test set contamination in black-box language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KS8mIvetg2. +Alberto Paccanaro and Geoffrey E Hinton. Learning Hierarchical Structures with Linear Relational Embedding. In Advances in Neural Information Processing Systems, volume 14. MIT Press, 2001. URL https://papers.nips.cc/paper_files/paper/2001/bit/814a9c18f5abff398787c9cfcbf3d80c-Abstract.html. +Kiho Park, Yo Joong Choe, and Victor Veitch. The Linear Representation Hypothesis and the Geometry of Large Language Models. In *Forty-First International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=UGpGkLzwpP. +Jeffrey Pennington, Richard Socher, and Christopher Manning. GloVe: Global vectors for word representation. In Alessandro Moschitti, Bo Pang, and Walter Daelemans (eds.), Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1532-1543, Doha, Qatar, October 2014. Association for Computational Linguistics. doi: 10.3115/v1/D14-1162. URL https://aclanthology.org/D14-1162. +Shauli Ravfogel, Yanai Elazar, Hila Gonen, Michael Twiton, and Yoav Goldberg. Null it out: Guarding protected attributes by iterative nullspace projection. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7237-7256, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.647. +Yasaman Razeghi, Robert L Logan IV, Matt Gardner, and Sameer Singh. Impact of pretraining term frequencies on few-shot numerical reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 840-854, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.59. URL https://aclanthology.org/2022-findings-emnlp.59/. +Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In NeurIPS Workshop on Attributing Model Behavior at Scale, 2023. URL https://openreview.net/forum?id=EKvqw9k3lC. + +Nina Rimsky, Nick Gabrieli, Julian Schulz, Meg Tong, Evan Hubinger, and Alexander Turner. Steering llama 2 via contrastive activation addition. pp. 15504-15522, August 2024. doi: 10.18653/v1/2024.acl-long.828. URL https://aclanthology.org/2024.acl-long.828/. +G. Salton, A. Wong, and C. S. Yang. A vector space model for automatic indexing. Commun. ACM, 18(11):613-620, November 1975. ISSN 0001-0782. doi: 10.1145/361219.361220. URL https://doi.org/10.1145/361219.361220. +Naomi Saphra and Sarah Wiegrefe. Mechanistic? In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 480-498, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.30. URL https://aclanthology.org/2024.blackboxnlp-1.30/. +Preethi Seshadri, Sameer Singh, and Yanai Elazar. The bias amplification paradox in text-to-image generation. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 6367-6384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.353. URL https://aclanthology.org/2024.naacl-long.353/. +Weijia Shi, Anirudh Ajith, Mengzhou Xia, Yangsibo Huang, Daogao Liu, Terra Blevins, Danqi Chen, and Luke Zettlemoyer. Detecting pretraining data from large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=zWqr3MQuNs. +Reza Shokri, Marco Stronati, Congzheng Song, and Vitaly Shmatikov. Membership inference attacks against machine learning models. In 2017 IEEE Symposium on Security and Privacy (SP), pp. 3-18, 2017. doi: 10.1109/SP.2017.41. URL https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7958568. +Aviv Slobodkin, Omer Goldman, Avi Caciularu, Ido Dagan, and Shauli Ravfogel. The curious case of hallucinatory (un)answerability: Finding truths in the hidden states of over-confident large language models. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 3607-3625, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.220. URL https://aclanthology.org/2023.emnlp-main.220/. +Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, et al. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15725-15788, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.840. URL https://aclanthology.org/2024.acl-long.840/. +Nishant Subramani, Nivedita Suresh, and Matthew Peters. Extracting Latent Steering Vectors from Pretrained Language Models. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 566-581. Association for Computational Linguistics, 2022. doi: 10.18653/v1/2022-findings-acl.48. URL https://aclanthology.org/2022-findings-acl.48. +Anshuman Suri and David Evans. Formalizing and estimating distribution inference risks. Proceedings on Privacy Enhancing Technologies, 2022. URL https://arxiv.org/abs/2109.06024. +Adly Templeton, Tom Conerly, Jonathan Marcus, Jack Lindsey, Trenton Bricken, Brian Chen, Adam Pearce, Craig Citro, Emmanuel Ameisen, Andy Jones, et al. Scaling Monoseismicity: Extracting Interpretable Features from Claude 3 Sonnet. 2024. URL https://transformer-circuits.pub/2024/scaling-monoseismicity/index.html. + +Eric Todd, Millicent Li, Arnab Sen Sharma, Aaron Mueller, Byron C Wallace, and David Bau. Function vectors in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AwyxtyMwaG. +Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021. +Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=dZsEOFUDew. +Xinyi Wang, Antonis Antoniades, Yanai Elazar, Alfonso Amayuelas, Alon Albalak, Kexun Zhang, and William Yang Wang. Generalization v.s. memorization: Tracing language models' capabilities back to pretraining data. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=IQxBDLmVpT. +Sang Michael Xie, Hieu Pham, Xuanyi Dong, Nan Du, Hanxiao Liu, Yifeng Lu, Percy Liang, Quoc V Le, Tengyu Ma, and Adams Wei Yu. Doremi: Optimizing data mixtures speeds up language model pretraining. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=1XuByUeHhd. + +# A LIMITATIONS + +While our approach thoroughly tracks exposure to individual terms and formation of LRE features across pretraining, we can not draw causal6 claims about how exposure affects individual representations, due to the cost of counterfactual pretraining. We try to address this by showing the frequency of individual terms can be predicted with some accuracy from measurements of LRE presence. We motivate this approach as a possible way to detect the training data of closed-data LMs; however, we are not able to make any guarantees on its efficacy in settings not shown here, and would caution drawing strong conclusions without additional information. Furthermore, we find that our method is relatively worse at predicting subject-object co-occurrences than object occurrences, and our method fails to account for the harder task. Future work could expand on this tool by incorporating it with other data inference methods for greater confidence. We also do not discuss the role of the presentation of facts on the formation of LRE features, but following Elsahar et al. (2018) and the strength of the relationship we find, we speculate this has minimal impact. Note that the BatchSearch tool we release tracks the exact position index of the searched terms, thus facilitating future work on questions about templates and presentation of information. + +# B EFFECT OF TRAINING ON INCORRECT EXAMPLES + +In Hernandez et al. (2024), examples are filtered to ones in which the LM gets correct, assuming that an LRE will only exist once a model has attained the knowledge to answer the relation accuracy (e.g., knowing many country capitals). We find that the choice of examples for fitting LREs is not entirely dependent on the model 'knowing' that relation perfectly (i.e., attains high accuracy). This is convenient for our study, where we test early checkpoint models, that do not necessarily have all of the information that they will have seen later in training. In Figure 5, we show faithfulness on relations where the LRE was fit with all, half, or zero correct examples. We omit data for which the model did not get enough incorrect examples. Averages across relations for which we have enough data are shown in Figure 4, which shows that there is not a considerable difference in the choice of LRE samples to train with. + +# C LRE HYPERPARAMETER TUNING + +There are three hyperparameters for fitting LREs: layer at which to edit the subject, the beta term used to scale the LRE weight matrix, and the rank of the pseudoinverse matrix used to make edits for + +![](images/01813bff727a9fe1a067c13b336446f4f03f90c681bba96141df80054d9c6e2a.jpg) +Figure 4: Average Causality and Faithfulness results across relations depending on if the LRE was fit with correct or incorrect samples. We find no notable difference in the choice of examples. + +![](images/e1bc4c029a94a41a2487a11963db8efb8a4bc436b13831cb494622be6967af91.jpg) + +![](images/b1468d161541d0304c9b2f6e11599e08df3b2e7d0401e29a1958c7c2c8462627.jpg) +Figure 5: Causality and Faithfulness results for each relation depending on if the LRE was fit with correct or incorrect samples. Note that relations with only one bar do not have zeros in the other categories. It means that there was not enough data that the model (OLMo-7B) got wrong to have enough examples to fit. + +![](images/ccf55fa257666450f7d7be8f3fb3642fd19ac04cc3fb787bce208255d416229f.jpg) +Best Layer Beta vs. Faithfulness +Figure 6: OLMo 0424 7B per layer faithfulness scores as a function of the choice of layer at which to fit the LRE. Note we do not use these results to choose the layer for the LRE, instead preferring the results from the causality sweep. + +measuring causality. Beta is exclusive to measuring faithfulness and rank is exclusive to causality. We test the same ranges for each as in Hernandez et al. (2024): [0, 5] beta and [0, full_rank] for causality at varying intervals. Those intervals are every 2 from [0,100], every 5 from [100,200], every 25 from [200, 500], every 50 from [500, 1000], every 250 from [1000, hidden_size]. We perform the hyperparameter sweeps across faithfulness and causality, but we choose the layer to edit based on the causality score. In cases where this is not the same layer as what faithfulness would decide, we use the layer causality chooses, as it would not make sense to train one LRE for each metric. We refer the reader to Hernandez et al. (2024) for more details on the interactions between hyperparameters and the choice of layer. The results of our sweeps on OLMo-7B across layers in Figures 6 and 7 and across beta and rank choices in Figures 8 and 9. + +# D BATCH SEARCH COUNTS COMPARED TO WIMBD + +In Figure 10, we find that What's in My Big Data (Elazar et al., 2024) matches very well to batch search co-occurrences; however, WIMBD tends to over-predict co-occurrences (slope less than 1), due to the sequence length being shorter than many documents, as discussed in the main paper. + +# E FEATURE CORRELATIONS AND IMPORTANCES + +Our feature importance test is shown in Figure 12. This permutation test was done on the heldout data to show which features contribute the most to generalization performance. We use PCA to + +![](images/7e45008f997fe87d85da17fac14afbe181e1bd855d46089d2000d6d279f17c0b.jpg) +Layer vs. Causality +Figure 7: OLMo 0424 7B per layer causality scores as a function of the choice of layer at which to fit the LRE. + +Best Layer Beta vs. Faithfulness + +![](images/834ebec1094b86ae7cfc91aa6d20e88a13cb123b46a91be46bb670f3f4ffa542.jpg) +Figure 8: OLMo 0424 7B LRE Beta hyperparameter sweep at highest performing layer. + +![](images/d48a6dd92df0c9e1200f947b2cd9997a53d627e698e84082717cc4c1719b0974.jpg) +Best Layer Rank vs. Causality +Figure 9: OLMo 0424 7B LRE Rank hyperparameter sweep at highest performing layer. + +WIMBD vs Batch Cooccurrence. slope=0.94, r=0.99 + +![](images/e0ce908e77a2e481e809b995710be04205bec67e9e7f5367e85b79837aa8831b.jpg) +WIMBD Cooccurrence + +![](images/c57e25852404ada77ca4a04964e77b1a84e1d9076ec8387246cef26b62d8f7fd.jpg) +Figure 10: Comparison between WIMBD and Batch Search subject-object co-occurrences + +![](images/033cc96dfe8534d7d86dd0369b088e2fbd4344a80840dfa4285ec2df9c7c3d0f.jpg) +Figure 11: Correlations between each feature in our regression analysis. Because of the high correlation between faithfulness metrics, we use a single dimensional PCA to attain one feature that captures $89\%$ of the variance of both for the purposes of doing feature importance tests. Note that we zero out the diagonal (which has values of 1) for readability. + +reduce the faithfulness features to one feature for the purposes of this test. Correlations are shown in Figure 11 + +# Permutation Importances + +![](images/685630e8cab5d7ffaecc4c03b74e95cadb0e2941465d30626d12961382cbfa32.jpg) +Figure 12: Hard causality is by far the most important feature for generalizing to new relations when predicting Object frequencies, causing a change in about $15\%$ accuracy. + +# F RELATIONSHIP BETWEEN CAUSALITY AND ACCURACY + +In this section, we provide more detail on the relationship between the formation of linear representations and accuracy on in-context learning tasks. Although the two are very highly correlated, we argue that accuracy and LRE formation are somewhat independent. + +We show this relationship across training for OLMo-1B in Figure 13 and 7B in Figure 14. + +# G EXTENDING TO COMMONSENSE RELATIONS + +Following Elsahar et al. (2018), we focus on factual relations because subject-object co-occurrences are shown to be a good proxy for mentions of the fact. For completeness, we consider 8 additional commonsense relations here. Results for OLMo-7B are shown in Figure 15. We show that frequency is correlated with causality score (.42) in these cases as well, but it is possible subject-object frequencies do not accurately track occurrences of the relation being mentioned. For example, in the "task person type" relation, the co-occurrence count of the subject "researching history" and the object "historian" does not convincingly describe all instances where the historian concept is defined during pretraining. Co-occurrences are perhaps more convincingly related to how a model learns that the outside of a coconut is brown, however (the fruit outside color relation). Therefore, we caution treating these under the same lens as the factual relations. Nevertheless, we believe these results are an interesting perspective on how a different relation family compares to factual relations. + +![](images/aa073b9b312d72e727c34d9effd4ff74d2bc67d5454ba1aa1fb8ca90ff01e807.jpg) +Zero Shot, 5 Shot, Causality: OLMo 1B +Figure 13: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-1B + +![](images/ca992b76e0489ff59dc80b449abc20f4339b2b2e88a80a74be1ef2bbeaf07d28.jpg) +Zero Shot, 5 Shot, Causality: OLMo 7B +Figure 14: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-7B + +![](images/dd50d1b038103e2aab255d65d92dd375cfbbf37543fcd2ba126437888775be19.jpg) +OLMo-7B 0424 Development of Commonsense LREs over Training Time +Figure 15:Commonsense relations compared to pretraining time in OLMo-7B. \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12459/images/01813bff727a9fe1a067c13b336446f4f03f90c681bba96141df80054d9c6e2a.jpg b/data/2025/2504_12xxx/2504.12459/images/01813bff727a9fe1a067c13b336446f4f03f90c681bba96141df80054d9c6e2a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e0b15a9fea9e0b2bb81325c48ea698979437981 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/01813bff727a9fe1a067c13b336446f4f03f90c681bba96141df80054d9c6e2a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f959f0dacf5c7f153fd35870cf1571ecb7f5de00a9430a8a9b808e2938ac27e +size 17407 diff --git a/data/2025/2504_12xxx/2504.12459/images/033cc96dfe8534d7d86dd0369b088e2fbd4344a80840dfa4285ec2df9c7c3d0f.jpg b/data/2025/2504_12xxx/2504.12459/images/033cc96dfe8534d7d86dd0369b088e2fbd4344a80840dfa4285ec2df9c7c3d0f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54a3971c9db4078e174e227dd6d4ca167f0dad4e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/033cc96dfe8534d7d86dd0369b088e2fbd4344a80840dfa4285ec2df9c7c3d0f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:851f16ea5194078a151722af35a81d4870cc86585cacdb8c9206c806044ae5e4 +size 27330 diff --git a/data/2025/2504_12xxx/2504.12459/images/1cd813e2fb83f1b4351e3155ec2db6833f96e31dd0f85d6bd6bd9a6cf6a6115e.jpg b/data/2025/2504_12xxx/2504.12459/images/1cd813e2fb83f1b4351e3155ec2db6833f96e31dd0f85d6bd6bd9a6cf6a6115e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3dbcea88146cf6216faa43cc54422b1a811f036d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/1cd813e2fb83f1b4351e3155ec2db6833f96e31dd0f85d6bd6bd9a6cf6a6115e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:113c46b22b1ab38af09ec2984921eda8001c883a494c4830b2004de8e34153a0 +size 11813 diff --git a/data/2025/2504_12xxx/2504.12459/images/685630e8cab5d7ffaecc4c03b74e95cadb0e2941465d30626d12961382cbfa32.jpg b/data/2025/2504_12xxx/2504.12459/images/685630e8cab5d7ffaecc4c03b74e95cadb0e2941465d30626d12961382cbfa32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ad0f39b12c82cb7bbde28b4f0356a9f8e50b23b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/685630e8cab5d7ffaecc4c03b74e95cadb0e2941465d30626d12961382cbfa32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:66878bddf1628b0e86e554d2733e90fa522b79d1bb453589fef1b03e36d4c161 +size 32943 diff --git a/data/2025/2504_12xxx/2504.12459/images/7e45008f997fe87d85da17fac14afbe181e1bd855d46089d2000d6d279f17c0b.jpg b/data/2025/2504_12xxx/2504.12459/images/7e45008f997fe87d85da17fac14afbe181e1bd855d46089d2000d6d279f17c0b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f9a9113e744f789a9f5c8aa6b14a6eaf62a7fcb --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/7e45008f997fe87d85da17fac14afbe181e1bd855d46089d2000d6d279f17c0b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a1ba2e19db04e6aedb891bd566ed0660c19c1303cde56453f313f8745547122 +size 130350 diff --git a/data/2025/2504_12xxx/2504.12459/images/834ebec1094b86ae7cfc91aa6d20e88a13cb123b46a91be46bb670f3f4ffa542.jpg b/data/2025/2504_12xxx/2504.12459/images/834ebec1094b86ae7cfc91aa6d20e88a13cb123b46a91be46bb670f3f4ffa542.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fbcf73021ab42e358176b77614e9b6c85f734c5 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/834ebec1094b86ae7cfc91aa6d20e88a13cb123b46a91be46bb670f3f4ffa542.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f500c50fa341062239ad291606141c39b050a2db9cd3856833244e2a2df1c55 +size 131144 diff --git a/data/2025/2504_12xxx/2504.12459/images/992f1844b981d1812454b47d5c56dafde0f907001c4704149adb3c14d065df3a.jpg b/data/2025/2504_12xxx/2504.12459/images/992f1844b981d1812454b47d5c56dafde0f907001c4704149adb3c14d065df3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d84a8d0eb155bf1499cc386f342669530bc9600 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/992f1844b981d1812454b47d5c56dafde0f907001c4704149adb3c14d065df3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b159a4407a66d0b48ea48d1a3077cc253a4f7c340497e272bd70144dbeac98e +size 15100 diff --git a/data/2025/2504_12xxx/2504.12459/images/aa073b9b312d72e727c34d9effd4ff74d2bc67d5454ba1aa1fb8ca90ff01e807.jpg b/data/2025/2504_12xxx/2504.12459/images/aa073b9b312d72e727c34d9effd4ff74d2bc67d5454ba1aa1fb8ca90ff01e807.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ec70835edc3405eb91105c205a0c94b59eaafa9 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/aa073b9b312d72e727c34d9effd4ff74d2bc67d5454ba1aa1fb8ca90ff01e807.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c0de561db5c515f6739dfd163fa759ad10c470ad44f04a463f97d346609673b +size 161989 diff --git a/data/2025/2504_12xxx/2504.12459/images/ad620ec86229e5de638a565490e5d6940af4a72fde26a3c5f92185684ac231db.jpg b/data/2025/2504_12xxx/2504.12459/images/ad620ec86229e5de638a565490e5d6940af4a72fde26a3c5f92185684ac231db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34d5470758aaaa68f2c523252878da2c68d5435f --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/ad620ec86229e5de638a565490e5d6940af4a72fde26a3c5f92185684ac231db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11de499066679c806cbe8fc59f3b2ef64682cced9756e1a928b87ea8c978219e +size 14605 diff --git a/data/2025/2504_12xxx/2504.12459/images/b1468d161541d0304c9b2f6e11599e08df3b2e7d0401e29a1958c7c2c8462627.jpg b/data/2025/2504_12xxx/2504.12459/images/b1468d161541d0304c9b2f6e11599e08df3b2e7d0401e29a1958c7c2c8462627.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb8982708cade0c796748e449b05b1f2edbb11fe --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/b1468d161541d0304c9b2f6e11599e08df3b2e7d0401e29a1958c7c2c8462627.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38f9292e052fe34c46f4fa04dfbfec7c421fde2dc6b1fca328208792ce37f1c1 +size 142982 diff --git a/data/2025/2504_12xxx/2504.12459/images/bba2f19318fa711c44305749d8472200aa8f1ab50e866a7ac5021954af5081a2.jpg b/data/2025/2504_12xxx/2504.12459/images/bba2f19318fa711c44305749d8472200aa8f1ab50e866a7ac5021954af5081a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3d7896fcfc497bcb89739ddf6c008c5c1bf83e43 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/bba2f19318fa711c44305749d8472200aa8f1ab50e866a7ac5021954af5081a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d097352920559c6942e52a5673a9780a357bf29f9e68d9f41c0543d327fb3df2 +size 46570 diff --git a/data/2025/2504_12xxx/2504.12459/images/c2a7842bab1f0f19feae7d25b7657ab844bac1cd20780d934a25f89f25c90b73.jpg b/data/2025/2504_12xxx/2504.12459/images/c2a7842bab1f0f19feae7d25b7657ab844bac1cd20780d934a25f89f25c90b73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2482ce40ae2bbaf3ee237a5b9703a9c99dadde3 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/c2a7842bab1f0f19feae7d25b7657ab844bac1cd20780d934a25f89f25c90b73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f16483a38c63b1964cbad16c43967898646743f93d909794e49354438d303fa0 +size 43552 diff --git a/data/2025/2504_12xxx/2504.12459/images/c57e25852404ada77ca4a04964e77b1a84e1d9076ec8387246cef26b62d8f7fd.jpg b/data/2025/2504_12xxx/2504.12459/images/c57e25852404ada77ca4a04964e77b1a84e1d9076ec8387246cef26b62d8f7fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1ceffaf5777a0be895fe381d44597cbad17cceb1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/c57e25852404ada77ca4a04964e77b1a84e1d9076ec8387246cef26b62d8f7fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3adfd96166be7ce013479a7d62cd159c851a5b0abe6b938df8560b496fd94207 +size 29316 diff --git a/data/2025/2504_12xxx/2504.12459/images/ca992b76e0489ff59dc80b449abc20f4339b2b2e88a80a74be1ef2bbeaf07d28.jpg b/data/2025/2504_12xxx/2504.12459/images/ca992b76e0489ff59dc80b449abc20f4339b2b2e88a80a74be1ef2bbeaf07d28.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6543d40dc3b17d213c2a42006359be06a7fc923b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/ca992b76e0489ff59dc80b449abc20f4339b2b2e88a80a74be1ef2bbeaf07d28.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b153380d84f80be2b453d7ef7cb5f1abb9ba1c60ab2ededf185d2402730deda0 +size 168157 diff --git a/data/2025/2504_12xxx/2504.12459/images/cb0bfcc12bbd2922c229c26e6bbe1f7109d8e66338a8087f22eaea99302131b2.jpg b/data/2025/2504_12xxx/2504.12459/images/cb0bfcc12bbd2922c229c26e6bbe1f7109d8e66338a8087f22eaea99302131b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..075b007134966b71b7c7822097887fe6865ee1ac --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/cb0bfcc12bbd2922c229c26e6bbe1f7109d8e66338a8087f22eaea99302131b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a2e4f7493df8c73013c5bdb62091aef75347ac01eff95a7e0d5e8ffd755bbd28 +size 9992 diff --git a/data/2025/2504_12xxx/2504.12459/images/ccf55fa257666450f7d7be8f3fb3642fd19ac04cc3fb787bce208255d416229f.jpg b/data/2025/2504_12xxx/2504.12459/images/ccf55fa257666450f7d7be8f3fb3642fd19ac04cc3fb787bce208255d416229f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bc470b6f3af5328cd6b5fd3e9fb04a2ea3b37166 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/ccf55fa257666450f7d7be8f3fb3642fd19ac04cc3fb787bce208255d416229f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ed585eada8637f19b0973053854ef39327b7b3dce8840ad6a97765d2b92ced4 +size 129061 diff --git a/data/2025/2504_12xxx/2504.12459/images/d48a6dd92df0c9e1200f947b2cd9997a53d627e698e84082717cc4c1719b0974.jpg b/data/2025/2504_12xxx/2504.12459/images/d48a6dd92df0c9e1200f947b2cd9997a53d627e698e84082717cc4c1719b0974.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c64c2684230f51e45b2cb676abb3a684cfbf9ccb --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/d48a6dd92df0c9e1200f947b2cd9997a53d627e698e84082717cc4c1719b0974.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3eb4dd76f7e71c2f68d0bd58f8197e7dd3ba55b2c2f1abeedd02391106ff9bc +size 130557 diff --git a/data/2025/2504_12xxx/2504.12459/images/dd50d1b038103e2aab255d65d92dd375cfbbf37543fcd2ba126437888775be19.jpg b/data/2025/2504_12xxx/2504.12459/images/dd50d1b038103e2aab255d65d92dd375cfbbf37543fcd2ba126437888775be19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c1ef7b77e8212608283b5514bd82921aeaebaea --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/dd50d1b038103e2aab255d65d92dd375cfbbf37543fcd2ba126437888775be19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd6f109d7c794e6ea9125a4a08a1ace1f715e595249dfc83927d153573136bd1 +size 29555 diff --git a/data/2025/2504_12xxx/2504.12459/images/df51c4633c26fe9b44c40708c2d148d48f2d69e5e1103b5e63f1c5f548f99183.jpg b/data/2025/2504_12xxx/2504.12459/images/df51c4633c26fe9b44c40708c2d148d48f2d69e5e1103b5e63f1c5f548f99183.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e75f4feeb1758fb87782f1916dc32073cb9fcff1 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/df51c4633c26fe9b44c40708c2d148d48f2d69e5e1103b5e63f1c5f548f99183.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c1a4bd61cbd1c907670c584847f846d1d9f1d413539f80d1b0ab73d1b0269a5 +size 60469 diff --git a/data/2025/2504_12xxx/2504.12459/images/e0ce908e77a2e481e809b995710be04205bec67e9e7f5367e85b79837aa8831b.jpg b/data/2025/2504_12xxx/2504.12459/images/e0ce908e77a2e481e809b995710be04205bec67e9e7f5367e85b79837aa8831b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1a31cfc3699d0f5f8f4893d567d0ae57eca0d273 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/e0ce908e77a2e481e809b995710be04205bec67e9e7f5367e85b79837aa8831b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f43017d8ca812ac03128c0722bb25a6948b0be9e14b35ebb9e3fedeabbf7a77d +size 27466 diff --git a/data/2025/2504_12xxx/2504.12459/images/e1028e25c5571fc63b95029cead2ac2fe6d2c38efd73e2bba131960e2b0ae469.jpg b/data/2025/2504_12xxx/2504.12459/images/e1028e25c5571fc63b95029cead2ac2fe6d2c38efd73e2bba131960e2b0ae469.jpg new file mode 100644 index 0000000000000000000000000000000000000000..973eb61d64bef17d86412a28a8aa2354e341b906 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/e1028e25c5571fc63b95029cead2ac2fe6d2c38efd73e2bba131960e2b0ae469.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60db7e3a6ac4a66300e61c724a7e8c58ba1a6e830c59579b8e431bf739aa12cb +size 15273 diff --git a/data/2025/2504_12xxx/2504.12459/images/e1bc4c029a94a41a2487a11963db8efb8a4bc436b13831cb494622be6967af91.jpg b/data/2025/2504_12xxx/2504.12459/images/e1bc4c029a94a41a2487a11963db8efb8a4bc436b13831cb494622be6967af91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42a9f491a7c307b051ea8670146fbf7d9207e6f8 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/e1bc4c029a94a41a2487a11963db8efb8a4bc436b13831cb494622be6967af91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea98ac10dff9766badaa40dc985dfc49df3dc6563d1fec355674e13624703b88 +size 15303 diff --git a/data/2025/2504_12xxx/2504.12459/images/e7c13d5cff0901794019ccc7b0ddce6b894588c779c409326c18dc7019fafaf9.jpg b/data/2025/2504_12xxx/2504.12459/images/e7c13d5cff0901794019ccc7b0ddce6b894588c779c409326c18dc7019fafaf9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc3fdcf638e609c909edad537dfe01336c16f5a3 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/images/e7c13d5cff0901794019ccc7b0ddce6b894588c779c409326c18dc7019fafaf9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82a4ad7569667ab8c3bccde67094000f0e9450e10770725760fcd578b1a5d44a +size 26388 diff --git a/data/2025/2504_12xxx/2504.12459/layout.json b/data/2025/2504_12xxx/2504.12459/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..d12cdf35e569afe02f1b59449d8dde960f4e126a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12459/layout.json @@ -0,0 +1,10157 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 504, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 504, + 118 + ], + "type": "text", + "content": "ON LINEAR REPRESENTATIONS AND PRETRAINING DATA FREQUENCY IN LANGUAGE MODELS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "spans": [ + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "text", + "content": "Jack Merullo" + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "inline_equation", + "content": "^{\\diamond}" + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "text", + "content": " Noah A. Smith" + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "inline_equation", + "content": "^{\\text{♣}}" + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "text", + "content": " Sarah Wiegrefe" + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "inline_equation", + "content": "^{\\text{♥♣}}" + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "text", + "content": " Yanai Elazar" + }, + { + "bbox": [ + 113, + 136, + 453, + 149 + ], + "type": "inline_equation", + "content": "^{\\text{♥♣}}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "spans": [ + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "type": "text", + "content": " Brown University, " + }, + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "type": "inline_equation", + "content": "\\diamond" + }, + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "type": "text", + "content": " Allen Institute for AI (Ai2), " + }, + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "type": "inline_equation", + "content": "\\clubsuit" + }, + { + "bbox": [ + 111, + 159, + 426, + 171 + ], + "type": "text", + "content": "University of Washington" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 171, + 192, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 171, + 192, + 181 + ], + "spans": [ + { + "bbox": [ + 112, + 171, + 192, + 181 + ], + "type": "text", + "content": "*Co-senior authors." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 182, + 449, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 182, + 449, + 194 + ], + "spans": [ + { + "bbox": [ + 112, + 182, + 449, + 194 + ], + "type": "text", + "content": "jack_merullo@brown.edu, {noah, sarahw, yanaie}@allenai.org" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 276, + 222, + 334, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 276, + 222, + 334, + 235 + ], + "spans": [ + { + "bbox": [ + 276, + 222, + 334, + 235 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 247, + 470, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 247, + 470, + 534 + ], + "spans": [ + { + "bbox": [ + 140, + 247, + 470, + 534 + ], + "type": "text", + "content": "Pretraining data has a direct impact on the behaviors and quality of language models (LMs), but we only understand the most basic principles of this relationship. While most work focuses on pretraining data's effect on downstream task behavior, we investigate its relationship to LM representations. Previous work has discovered that, in language models, some concepts are encoded 'linearly' in the representations, but what factors cause these representations to form (or not)? We study the connection between pretraining data frequency and models' linear representations of factual relations (e.g., mapping France to Paris in a capital prediction task). We find evidence that the formation of linear representations is strongly connected to pretraining term frequencies; specifically for subject-relation-object fact triplets, both subject-object co-occurrence frequency and in-context learning accuracy for the relation are highly correlated with linear representations. This is the case across all phases of pretraining, i.e., it is not affected by the model's underlying capability. In OLMo-7B and GPT-J (6B), we discover that a linear representation consistently (but not exclusively) forms when the subjects and objects within a relation co-occur at least 1k and 2k times, respectively, regardless of when these occurrences happen during pretraining (and around 4k times for OLMo-1B). Finally, we train a regression model on measurements of linear representation quality in fully-trained LMs that can predict how often a term was seen in pretraining. Our model achieves low error even on inputs from a different model with a different pretraining dataset, providing a new method for estimating properties of the otherwise-unknown training data of closed-data models. We conclude that the strength of linear representations in LMs contains signal about the models' pretraining corpora that may provide new avenues for controlling and improving model behavior: particularly, manipulating the models' training data to meet specific frequency thresholds. We release our code to support future work." + }, + { + "bbox": [ + 140, + 247, + 470, + 534 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 555, + 206, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 555, + 206, + 567 + ], + "spans": [ + { + "bbox": [ + 106, + 555, + 206, + 567 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 506, + 702 + ], + "type": "text", + "content": "Understanding how the content of pretraining data affects language model (LM) behaviors and performance is an active area of research (Ma et al., 2024; Xie et al., 2023; Aryabumi et al., 2025; Longpre et al., 2024; Wang et al., 2025; Seshadri et al., 2024; Razeghi et al., 2023; Wang et al., 2024). For instance, it has been shown that for specific tasks, models perform better on instances containing higher frequency terms than lower frequency ones (Razeghi et al., 2022; Mallen et al., 2023; McCoy et al., 2024). However, the ways in which frequency affects the internal representations of LMs to cause this difference in performance remain unclear. We connect dataset statistics to recent work in interpretability, which focuses on the emergence of simple linear representations of factual relations in LMs Hernandez et al. (2024); Chanin et al. (2024). Our findings demonstrate a strong correlation between these linear representations and the frequency of terms in the pretraining corpus." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 711, + 504, + 732 + ], + "type": "text", + "content": "1Code is available at https://github.com/allenai/freq, and for efficient batch search at https://github.com/allenai/batchsearch." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 14, + 210, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 210, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 210, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.12459v1 [cs.CL] 16 Apr 2025" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 182 + ], + "type": "text", + "content": "Linear representations in LMs have become central to interpretability research in recent years (Ravfogel et al., 2020; Elazar et al., 2021; Elhage et al., 2021; Slobodkin et al., 2023; Olah et al., 2020; Park et al., 2024; Jiang et al., 2024; Black et al., 2022; Chanin et al., 2024). Linear representations are essentially linear approximations (linear transforms, directions in space) that are simple to understand, and strongly approximate the complex non-linear transformations that networks are implementing. These representations are crucial because they allow us to localize much of the behavior and capabilities of LMs to specific directions in activation space. This allows for simple interventions to control model behaviors, i.e., steering (Todd et al., 2024; Subramani et al., 2022; Hendel et al., 2023; Rimsky et al., 2024)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 186, + 506, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 186, + 506, + 288 + ], + "spans": [ + { + "bbox": [ + 104, + 186, + 506, + 288 + ], + "type": "text", + "content": "Recent work by Hernandez et al. (2024) and Chanin et al. (2024) highlight how the linearity of different types of relations varies greatly depending on the specific relationships being depicted. For example, over " + }, + { + "bbox": [ + 104, + 186, + 506, + 288 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 104, + 186, + 506, + 288 + ], + "type": "text", + "content": " of entities in the \"country-largest-city\" relation, but less than " + }, + { + "bbox": [ + 104, + 186, + 506, + 288 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 186, + 506, + 288 + ], + "type": "text", + "content": " of entities in the \"star-in-constellation\" relation can be approximated this way (Hernandez et al., 2024). Such findings complicate the understanding of the Linear Representation Hypothesis, which proposes that LMs will represent features linearly (Park et al., 2024) without providing when/why these form. While Jiang et al. (2024) provide both theoretical and empirical evidence that the training objectives of LMs implicitly encourage linear representations, it remains unclear why some features are represented this way while others are not. This open question is a central focus of our investigation." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 292, + 506, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 292, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 292, + 506, + 447 + ], + "type": "text", + "content": "Whether linear representations for \"common\" concepts are more prevalent in models or simply easier to identify (using current methods) than those for less common concepts remains unclear. We hypothesize that factual relations exhibiting linear representations are correlated with higher mention frequencies in the pretraining data (as has been shown with static embeddings, see Ethayarajh et al., 2019), which we confirm in Section 4. Our results also indicate that this can occur at any point in pretraining, as long as a certain average frequency is reached across subject-object pairs in a relation. In order to count the appearance of terms in data corpora throughout training, we develop an efficient tool for counting tokens in tokenized batches of text, which we release to support future work in this area. We also explore whether the presence of linear representations can provide insights into relation term frequency. In Section 5, we fit a regression model to predict the frequency of individual terms (such as \"The Beatles\") in the pretraining data, based on metrics measuring the presence of a linear representation for some relation. For example, how well a linear transformation approximates the internal computation of the \"lead-singer-of\" relation mapping \"John Lennon\" to \"The Beatles\" can tell us about the frequency of those terms in the pretraining corpus." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 451, + 506, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 506, + 509 + ], + "type": "text", + "content": "Our findings indicate that the predictive signal, although approximate, is much stronger than that encoded in log probabilities and task accuracies alone, allowing us to estimate the frequencies of held-out relations and terms within approximate ranges. Importantly, this regression model generalizes beyond the specific LM it was trained on without additional supervision. This provides a valuable foundation for analyzing the pretraining corpora of closed-data models with open weights." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 512, + 278, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 278, + 524 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 278, + 524 + ], + "type": "text", + "content": "To summarize, in this paper we show that:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 127, + 542, + 504, + 717 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 129, + 542, + 504, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 542, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 129, + 542, + 504, + 563 + ], + "type": "text", + "content": "1. The development of linear representations for factual recall relations in LMs is related to frequency as well as model size." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 585, + 504, + 619 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 585, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 127, + 585, + 504, + 619 + ], + "type": "text", + "content": "2. Linear representations form at predictable frequency thresholds during training, regardless of when this frequency threshold is met for the nouns in the relation. The formation of these representations also correlates strongly with recall accuracy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 127, + 639, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 639, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 127, + 639, + 504, + 673 + ], + "type": "text", + "content": "3. Measuring the extent to which a relation is represented linearly in a model allows us to predict the approximate frequencies of individual terms in the pretraining corpus of that model, even when we do not have access to the model's training data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 694, + 504, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 694, + 504, + 717 + ], + "spans": [ + { + "bbox": [ + 127, + 694, + 504, + 717 + ], + "type": "text", + "content": "4. We release a tool for accurately and efficiently searching through tokenized text to support future research on training data." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 156, + 81, + 447, + 264 + ], + "blocks": [ + { + "bbox": [ + 156, + 81, + 447, + 264 + ], + "lines": [ + { + "bbox": [ + 156, + 81, + 447, + 264 + ], + "spans": [ + { + "bbox": [ + 156, + 81, + 447, + 264 + ], + "type": "image", + "image_path": "bba2f19318fa711c44305749d8472200aa8f1ab50e866a7ac5021954af5081a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 276, + 506, + 344 + ], + "lines": [ + { + "bbox": [ + 104, + 276, + 506, + 344 + ], + "spans": [ + { + "bbox": [ + 104, + 276, + 506, + 344 + ], + "type": "text", + "content": "Figure 1: Overview of this work. Given a dataset of subject-relation-object factual relation triplets, we count subject-object co-occurrences throughout pretraining batches. We then measure how well the corresponding relations are represented within an LM across pretraining steps, using the Linear Relational Embeddings (LRE) method from Hernandez et al. (2024). We establish a strong relationship between average co-occurrence frequency and a model's tendency to form linear representations for relations. From this, we show that we can predict frequencies in the pretraining corpus" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 363, + 201, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 363, + 201, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 363, + 201, + 376 + ], + "type": "text", + "content": "2 BACKGROUND" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 389, + 250, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 250, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 250, + 400 + ], + "type": "text", + "content": "2.1 LINEAR REPRESENTATIONS" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 410, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 506, + 620 + ], + "type": "text", + "content": "Vector space models have a long history in language processing, where geometric properties of these spaces were used to encode semantic information (Salton et al., 1975; Paccanaro & Hinton, 2001). When and why linear structure emerges without explicit bias has been of considerable interest since the era of static word embeddings. Work on skipgram models (Mikolov et al., 2013a) found that vector space models of language learn regularities which allow performing vector arithmetic between word embeddings to calculate semantic relationships (e.g., France - Paris + Spain = Madrid) (Mikolov et al., 2013b; Pennington et al., 2014). This property was subject to much debate, as it was not clear why word analogies would appear for some relations and not others (Köper et al., 2015; Karpinska et al., 2018; Gladkova et al., 2016). Followup work showed that linguistic regularities form in static embeddings for relations under specific dataset frequency constraints for relevant terms (Ethayarajh et al., 2019), but does not clearly relate to how modern LMs learn. More recently, there has been renewed interest in the presence of similar linear structure in models with contextual embeddings like transformer language models (Park et al., 2024; Jiang et al., 2024; Merullo et al., 2024). As a result, there are many ways to find and test for linear representations in modern LMs, though the relationship to pretraining data was not addressed (Huben et al., 2024; Gao et al., 2025; Templeton et al., 2024; Rimsky et al., 2024; Todd et al., 2024; Hendel et al., 2023; Hernandez et al., 2024; Chanin et al., 2024). Many of these share similarities in how they compute and test for linear representations. We focus on a particular class of linear representations called Linear Relational Embeddings (LREs) (Paccanaro & Hinton, 2001)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "Linear Relational Embeddings (LREs) Hernandez et al. (2024) use a particular class of linear representation called a Linear Relational Embedding (Paccanaro & Hinton, 2001) to approximate the computation performed by a model to predict the objects that complete common subject-relation-object triplets as an affine transformation. This transform is calculated from a hidden state " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": ", the subject token representation at some middle layer of the model, to " + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "inline_equation", + "content": "\\mathbf{o}" + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": ", the hidden state at the last token position and layer of the model (i.e., the final hidden state that decodes a token in an autoregressive transformer) within a natural language description of the relation. For example, given the input sequence \"Miles Davis (subject) plays the (relation)\", the goal is to approximate the computation of the object \"trumpet\", assuming the model predicts the object cor-" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 172 + ], + "type": "text", + "content": "directly. It was found that this transformation holds for nearly every subject and object in the relation set (such as \"Cat Stevens plays the guitar\") for some relations. This is surprising because, despite the nonlinearities within the many layers and token positions separating s and o, a simple structure within the representation space well approximates the model's prediction process for a number of factual relations. In this work we study LREs under the same definition and experimental setup, because it allows us to predefine the concepts we want to search for (e.g., factual relations), as well as use a handful of representations to relate thousands of terms in the dataset by learning linear representations on a per-relation level." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": "Hernandez et al. calculate LREs to approximate an LM's computation as a first-order Taylor Series approximation. Let " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "F(\\mathbf{s}, c) = \\mathbf{o}" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " be the forward pass through a model that produces object representation " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\mathbf{o}" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " given subject representation " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\mathbf{s}" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " and a few-shot context " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": ", this computation is approximated as " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "F(\\mathbf{s}, c) \\approx W\\mathbf{s} + b = F(\\mathbf{s}_i, c) + W(\\mathbf{s} - \\mathbf{s}_i)" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " where we approximate the relation about a specific subject " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_i" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": ". Hernandez et al. propose to compute " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "b" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " using the average of " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " examples from the relation " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "(n = 8" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " here) with " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "\\frac{\\partial F}{\\partial\\mathbf{s}}" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": " representing the Jacobian Matrix of " + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "inline_equation", + "content": "F" + }, + { + "bbox": [ + 104, + 175, + 506, + 245 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 163, + 251, + 505, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 251, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 163, + 251, + 505, + 285 + ], + "type": "interline_equation", + "content": "W = \\mathbb {E} _ {\\mathbf {s} _ {i}, c _ {i}} \\left[ \\left. \\frac {\\partial F}{\\partial \\mathbf {s}} \\right| _ {(\\mathbf {s} _ {i}, c _ {i})} \\right] \\quad \\text {a n d} \\quad b = \\mathbb {E} _ {\\mathbf {s} _ {i}, c _ {i}} \\left[ \\left. F (\\mathbf {s}, c) - \\frac {\\partial F}{\\partial \\mathbf {s}} \\right| _ {(\\mathbf {s} _ {i}, c _ {i})} \\right] \\tag {1}", + "image_path": "cb0bfcc12bbd2922c229c26e6bbe1f7109d8e66338a8087f22eaea99302131b2.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 297, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 504, + 352 + ], + "type": "text", + "content": "In practice, LREs are estimated using hidden states from LMs during the processing of the test example in a few-shot setup. For a relation like \"instrument-played-by-musician\", the model may see four examples (in the form \"[X] plays the [Y]\") and on the fifth example, when predicting e.g., \"trumpet\" from \"Miles Davis plays the\", the subject representation s and object representation o are extracted." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 367, + 319, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 367, + 319, + 379 + ], + "spans": [ + { + "bbox": [ + 105, + 367, + 319, + 379 + ], + "type": "text", + "content": "2.2 INFERRING TRAINING DATA FROM MODELS" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 388, + 506, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 506, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 506, + 467 + ], + "type": "text", + "content": "There has been significant interest recently in understanding the extent to which it is possible to infer the training data of a fully trained neural network, including LMs, predominantly by performing membership inference attacks (Shokri et al., 2017; Carlini et al., 2022), judging memorization of text (Carlini et al., 2023; Oren et al., 2024; Shi et al., 2024), or inferring the distribution of data sources (Hayase et al., 2024; Ateniese et al., 2015; Suri & Evans, 2022). Our work is related in that we find hints of the pretraining data distribution in the model itself, but focus on how linear structure in the representations relates to training data statistics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 484, + 179, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 484, + 179, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 484, + 179, + 496 + ], + "type": "text", + "content": "3 METHODS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 510, + 506, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 506, + 578 + ], + "type": "text", + "content": "Our analysis is twofold: counts of terms in the pretraining corpus of LMs, and measurements of how well factual relations are approximated by affine transformations. We use the OLMo model v1.7 (0424 7B and 0724 1B) (Groeneveld et al., 2024) and GPT-J (6B) (Wang & Komatsuzaki, 2021) and their corresponding datasets: Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), respectively. To understand how these features form over training time, we test eight model checkpoints throughout training in the OLMo family of models (Groeneveld et al., 2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 592, + 356, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 356, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 356, + 604 + ], + "type": "text", + "content": "3.1 LINEAR RELATIONAL EMBEDDINGS (LRES) IN LMS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 613, + 504, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 613, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 104, + 613, + 504, + 669 + ], + "type": "text", + "content": "We use a subset of the RELATIONS dataset Hernandez et al. (2024), focusing on the 25 factual relations of the dataset, such as capital-city and person-mother (complete list in Appendix B). Across these relations, there are 10,488 unique subjects and objects. Following Hernandez et al. (2024), we fit an LRE for each relation on 8 examples from that relation, each with a 5-shot prompt. We use the approach from this work as described in Section 2.1." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 681, + 506, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 506, + 733 + ], + "type": "text", + "content": "For the analysis, we drop \"landmark-on-continent\" because " + }, + { + "bbox": [ + 104, + 681, + 506, + 733 + ], + "type": "inline_equation", + "content": "74\\%" + }, + { + "bbox": [ + 104, + 681, + 506, + 733 + ], + "type": "text", + "content": " of the answers are Antarctica, making it potentially confounding for extracting a representation for the underlying relation. Factual relations are much easier to get accurate counts for, so we leave non-factual relations for future work (e.g., although LMs associate the \"pilot\" occupation with men, this relation does not map to the word \"man\" the way \"France\" maps to \"Paris\"; see §3.2)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": "Fitting LREs Hernandez et al. (2024) find that Equation 1 underestimates the optimal slope of the linear transformation, so they scale each relation's " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " by a scalar hyperparameter " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": ". Unlike the original work, which finds one " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " per model, we use one " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " per relation, as this avoids disadvantageing specific relations. Another difference in our calculation of LREs is that we do not impose the constraint that the model has to predict the answer correctly to be used as one of the 8 examples used to approximate the Jacobian Matrix. Interestingly, using examples that models predict incorrectly to fit Equation 1 works as well as using only correct examples. We opt to use this variant as it allows us to compare different checkpoints and models (\\$4) with linear transformations trained on the same 8 examples, despite the fact that the models make different predictions on these instances. We explore the effect of example choice in Appendix B and find that it does not make a significant difference. We also explore the choice of layer in Appendix C." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 227, + 506, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 227, + 506, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 227, + 506, + 339 + ], + "type": "text", + "content": "Metrics To evaluate the quality of LREs, Hernandez et al. (2024) introduce two metrics that measure the quality of the learned transformations. Faithfulness measures whether the transformation learned by the LRE produces the same object token prediction as the original LM. Causality measures the proportion of the time a prediction of an object can be changed to the output of a different example from the relation (e.g., editing the Miles Davis subject representation so that the LM predicts he plays the guitar, instead of the trumpet). For specifics on implementation, we refer the reader to Hernandez et al. (2024). We consider an LRE to be high 'quality' when it scores highly on these metrics, as this measures when an LRE works across subject-object pairs within the relation. In general, we prefer to use causality in our analysis, as faithfulness can be high when LMs predict the same token very often (like in early checkpoints)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 361, + 353, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 353, + 374 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 353, + 374 + ], + "type": "text", + "content": "3.2 COUNTING FREQUENCIES THROUGHOUT TRAINING" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 387, + 504, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 504, + 455 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 504, + 455 + ], + "type": "text", + "content": "A key question we explore is how term frequencies affect the formation of linear representations. We hypothesize that more commonly occurring relations will lead to higher quality LREs for those relations. Following Elsahar et al. (2018); Elazar et al. (2022), we count an occurrence of a relation when a subject and object co-occur together. While term co-occurrence is used as a proxy for the frequency of the entire triplet mentioned in text, Elsahar et al. (2018) show that this approximation is quite accurate. We now discuss how to compute these co-occurrence counts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 477, + 506, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 477, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 477, + 506, + 588 + ], + "type": "text", + "content": "What's in My Big Data? (WIMBD) Elazar et al. (2024) index many popular pretraining datasets, including Dolma (Soldaini et al., 2024) and the Pile (Gao et al., 2020), and provide search tools that allow for counting individual terms and co-occurrences within documents. However, this only gives us counts for the full dataset. Since we are interested in counting term frequencies throughout pretraining, we count these within training batches of OLMo instead. When per-batch counts are not available, WIMBD offers a good approximation for final checkpoints, which is what we do in the case of GPT-J. We compare WIMBD co-occurrence counts to the Batch Search method (described below) for the final checkpoint of OLMo in Appendix D, and find that the counts are extremely close: The slope of the best fit line for BatchCount against WIMBDCount is .94, because co-occurrence counts are overestimated when considering the whole document." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 506, + 733 + ], + "type": "text", + "content": "Batch Search Data counting tools cannot typically provide accurate counts for model checkpoints at arbitrary training steps. Thus, we design a tool to efficiently count exact co-occurrences within sequences of tokenized batches. This also gives us the advantage of counting in a way that is highly accurate to how LMs are trained; since LMs are trained on batches of fixed lengths which often split documents into multiple sequences, miscounts may occur unless using tokenized sequences. Using this method, we note every time one of our 10k terms appears throughout a dataset used to pretrain an LM. We count a co-occurrence as any time two terms appear in the same sequence within a batch (a (batch-size, sequence-length) array). We search 10k terms in the approximately 2T tokens of Dolma (Soldaini et al., 2024) this way. Using our implementation, we are able to complete this on 900 CPUs in about a day. To support future work, we release our code as Cython bindings that integrate out of the box with existing libraries." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 100, + 296, + 178 + ], + "blocks": [ + { + "bbox": [ + 108, + 81, + 312, + 92 + ], + "lines": [ + { + "bbox": [ + 108, + 81, + 312, + 92 + ], + "spans": [ + { + "bbox": [ + 108, + 81, + 312, + 92 + ], + "type": "text", + "content": "OLMo-7B 0424 Development of LREs over Training Time" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 100, + 296, + 178 + ], + "lines": [ + { + "bbox": [ + 108, + 100, + 296, + 178 + ], + "spans": [ + { + "bbox": [ + 108, + 100, + 296, + 178 + ], + "type": "image", + "image_path": "992f1844b981d1812454b47d5c56dafde0f907001c4704149adb3c14d065df3a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 99, + 392, + 109 + ], + "lines": [ + { + "bbox": [ + 331, + 99, + 392, + 109 + ], + "spans": [ + { + "bbox": [ + 331, + 99, + 392, + 109 + ], + "type": "text", + "content": "Final Model" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 326, + 113, + 502, + 171 + ], + "blocks": [ + { + "bbox": [ + 326, + 113, + 502, + 171 + ], + "lines": [ + { + "bbox": [ + 326, + 113, + 502, + 171 + ], + "spans": [ + { + "bbox": [ + 326, + 113, + 502, + 171 + ], + "type": "table", + "html": "
ModelCo-Occurrence Threshold (Mean Causality >.9)
GPT-J (6B)1,097
OLMo-7B1,998
OLMo-1B4,447
", + "image_path": "e1028e25c5571fc63b95029cead2ac2fe6d2c38efd73e2bba131960e2b0ae469.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 108, + 201, + 296, + 281 + ], + "blocks": [ + { + "bbox": [ + 109, + 182, + 312, + 193 + ], + "lines": [ + { + "bbox": [ + 109, + 182, + 312, + 193 + ], + "spans": [ + { + "bbox": [ + 109, + 182, + 312, + 193 + ], + "type": "text", + "content": "OLMo-1B 0724 Development of LREs over Training Time" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 108, + 201, + 296, + 281 + ], + "lines": [ + { + "bbox": [ + 108, + 201, + 296, + 281 + ], + "spans": [ + { + "bbox": [ + 108, + 201, + 296, + 281 + ], + "type": "image", + "image_path": "ad620ec86229e5de638a565490e5d6940af4a72fde26a3c5f92185684ac231db.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 202, + 496, + 281 + ], + "blocks": [ + { + "bbox": [ + 331, + 87, + 432, + 99 + ], + "lines": [ + { + "bbox": [ + 331, + 87, + 432, + 99 + ], + "spans": [ + { + "bbox": [ + 331, + 87, + 432, + 99 + ], + "type": "text", + "content": "41B Tokens (10k steps)" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 328, + 182, + 496, + 193 + ], + "lines": [ + { + "bbox": [ + 328, + 182, + 496, + 193 + ], + "spans": [ + { + "bbox": [ + 328, + 182, + 496, + 193 + ], + "type": "text", + "content": "GPT-J Development of LREs over Training Time" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 202, + 496, + 281 + ], + "lines": [ + { + "bbox": [ + 310, + 202, + 496, + 281 + ], + "spans": [ + { + "bbox": [ + 310, + 202, + 496, + 281 + ], + "type": "image", + "image_path": "1cd813e2fb83f1b4351e3155ec2db6833f96e31dd0f85d6bd6bd9a6cf6a6115e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 293, + 504, + 370 + ], + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 370 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 370 + ], + "type": "text", + "content": "Figure 2: We find that LREs have consistently high causality scores across relations after some average frequency threshold is reached (table, top right). In OLMo models, red dots show the model's LRE performance at 41B tokens, and blue dots show the final checkpoint performance (550k steps in 7B). Gray dots show intermediate checkpoints. We highlight Even at very early training steps, if the average subject-object cooc. count is high enough, the models are very likely to already have robust LREs formed in the representation space. Symbols represent different relations. Highlighted relations are shown in darker lines." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 376, + 476, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 476, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 476, + 403 + ], + "type": "text", + "content": "4 FREQUENCY OF SUBJECT-OBJECT CO-OCCURRENCES ALIGNS WITH EMERGENCE OF LINEAR REPRESENTATIONS" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 417, + 504, + 473 + ], + "type": "text", + "content": "In this section, we explore when LREs begin to appear at training time and how these are related to pretraining term frequencies. Our main findings are that (1) average co-occurrence frequency within a relation strongly correlates with whether an LRE will form; (2) the frequency effect is independent of the pretraining stage; if the average subject-object co-occurrence for a relation surpasses some threshold, it is very likely to have a high-quality LRE, even for early pretraining steps." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 487, + 161, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 161, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 161, + 498 + ], + "type": "text", + "content": "4.1 SETUP" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 509, + 506, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 509, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 509, + 506, + 631 + ], + "type": "text", + "content": "Using the factual recall relations from the Hernandez et al. (2024) dataset, we use the Batch Search method (§3.2) to count subject and object co-occurrences within sequences in Dolma (Soldaini et al., 2024) used to train the OLMo-1B (v. 0724) and 7B (v. 0424) models (Groeneveld et al., 2024). The OLMo family of models provides tools for accurately recreating the batches from Dolma, which allow us to reconstruct the data the way the model was trained. We also use GPT-J (Wang & Komatsuzaki, 2021) and the Pile (Gao et al., 2020) as its training data, but since we do not have access to accurate batches used to train it, we use WIMBD (Elazar et al., 2024) to count subject-object counts in the entire data. We fit LREs on each relation and model separately. Hyperparameter sweeps are in Appendix C. OLMo also releases intermediate checkpoints, which we use to track development over pretraining time. We use checkpoints that have seen {41B, 104B, 209B, 419B, 628B, 838B, 1T, and 2T} tokens.3 We use the Pearson coefficient for measuring correlation." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 645, + 170, + 656 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 645, + 170, + 656 + ], + "spans": [ + { + "bbox": [ + 105, + 645, + 170, + 656 + ], + "type": "text", + "content": "4.2 RESULTS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "text", + "content": "Our results are summarized in Figure 2. We report training tokens because the step count differs between 7B and 1B. Co-occurrence frequencies highly correlate with causality " + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "inline_equation", + "content": "(r = 0.82)" + }, + { + "bbox": [ + 104, + 666, + 504, + 689 + ], + "type": "text", + "content": ". This" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 118, + 700, + 491, + 712 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 700, + 491, + 712 + ], + "spans": [ + { + "bbox": [ + 118, + 700, + 491, + 712 + ], + "type": "text", + "content": "3In OLMo-7B 0424, this corresponds to " + }, + { + "bbox": [ + 118, + 700, + 491, + 712 + ], + "type": "inline_equation", + "content": "10\\mathrm{k}" + }, + { + "bbox": [ + 118, + 700, + 491, + 712 + ], + "type": "text", + "content": " 25k, 50k, 100k, 150k, 200k, 250k, 409k pretraining steps" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 712, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 712, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 712, + 504, + 732 + ], + "type": "text", + "content": "These are: 'country largest city', 'country currency', 'company hq', 'company CEO', and 'star constellation name' in order from best to worst performing final checkpoints." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "is notably higher than the correlations with subject frequencies: " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "r = 0.66" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": ", and object frequencies: " + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "inline_equation", + "content": "r = 0.59" + }, + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": " for both OLMo-7B and OLMo-1B, respectively." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 177 + ], + "type": "text", + "content": "We consider a causality score above 0.9 to be nearly perfectly linear. The table in Figure 2 shows the co-occurrence counts above which the average causality is above 0.9 and is shown by dashed black lines on the scatterplots. Regardless of pretraining step, models that surpass this threshold have very high causality scores. Although we cannot draw conclusions from only three models, it is possible that scale also affects this threshold: OLMo-7B and GPT-J (6B params) require far less exposure than OLMo-1B." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 190, + 261, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 190, + 261, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 190, + 261, + 201 + ], + "type": "text", + "content": "4.3 RELATIONSHIP TO ACCURACY" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "text", + "content": "Increased frequency (or a proxy for it) was shown to lead to better factual recall in LMs (Chang et al., 2024; Mallen et al., 2023). However, it remains unknown whether high accuracy entails the existence of a linear relationship. Such a finding would inform when we expect an LM to achieve high accuracy on a task. We find that the correlation between causality and subject-object frequency is higher than with 5-shot accuracy (0.82 v.s. 0.74 in OLMo-7B), though both are clearly high. In addition, there are a few examples of high accuracy relations that do not form single consistent LREs. These relations are typically low frequency, such as star constellation name, which has " + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "inline_equation", + "content": "84\\%" + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "text", + "content": " 5-shot accuracy but only " + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "inline_equation", + "content": "44\\%" + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "text", + "content": " causality (OLMo-7B), with subjects and objects only co-occurring about 21 times on average across the full dataset. In general, few-shot accuracy closely tracks causality, consistent with arguments that in-context learning allows models to identify linear mappings between input-output pairs (Hendel et al., 2023; Garg et al., 2022). We find that causality increases first in some cases, like \"food-from-country\" having a causality of " + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "inline_equation", + "content": "65\\%" + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "text", + "content": " but a 5-shot accuracy of only " + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "inline_equation", + "content": "42\\%" + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "text", + "content": ". This gap is consistently closed through training. In the final model, causality and 5-shot accuracy are within " + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "inline_equation", + "content": "11\\%" + }, + { + "bbox": [ + 104, + 209, + 506, + 376 + ], + "type": "text", + "content": " on average. We report the relationship between every relation, zero-shot, and few-shot accuracy for OLMo models across training in Appendix F." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 380, + 505, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 505, + 426 + ], + "type": "text", + "content": "A fundamental question in the interpretability community is under what circumstances linear structures form. While previous work has shown that the training objective encourages this type of representation (Jiang et al., 2024), our results suggest that the reason why some concepts form a linear representation while others do not is strongly related to the pretraining frequency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 440, + 468, + 467 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 440, + 468, + 467 + ], + "spans": [ + { + "bbox": [ + 105, + 440, + 468, + 467 + ], + "type": "text", + "content": "5 LINEAR REPRESENTATIONS HELP PREDICT PRETRAINING CORPUS FREQUENCIES" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 478, + 506, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 506, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 506, + 589 + ], + "type": "text", + "content": "In this section, we aim to understand this relationship further by exploring what we can understand about pretraining term frequency from linearity of LM representations. We target the challenging problem of predicting how often a term, or co-occurrence of terms, appears in an LM's training data from the representations alone. Such prediction model can be useful, if it generalizes, when applied to other models whose weights are open, but the data is closed. For instance, such predictive model could tell us whether a model was trained on specific domains (e.g., Java code) by measuring the presence of relevant LREs. First, we show that LRE features encode information about frequency that is not present using probabilities alone. Then, we show how a regression fit on one model generalizes to the features extracted from another without any information about the new model's counts." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 601, + 230, + 612 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 230, + 612 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 230, + 612 + ], + "type": "text", + "content": "5.1 EXPERIMENTAL SETUP" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "We fit a regression to the Relations dataset (Hernandez et al., 2024) using OLMo-7B LRE features and log probabilities. We fit 24 models such that each relation is held out once per random seed across 4 seeds. We train a random forest regression model with 100 decision tree estimators to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting \"John Lennon\" and \"The Beatles\" or just \"The Beatles\") from one of two sets of features. Our baseline set of features is based on likelihood of recalling a fact. Given some few-shot context from the relations dataset (\"John Lennon is a lead singer of\") we extract the log probability of the correct answer, as well as the average accuracy on this prompt across 5 trials. The intuition is that models will be more confident about highly frequent terms. The other set of features include the first, as well as faithfulness and causality measurement." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 146, + 82, + 466, + 208 + ], + "blocks": [ + { + "bbox": [ + 146, + 82, + 466, + 208 + ], + "lines": [ + { + "bbox": [ + 146, + 82, + 466, + 208 + ], + "spans": [ + { + "bbox": [ + 146, + 82, + 466, + 208 + ], + "type": "image", + "image_path": "e7c13d5cff0901794019ccc7b0ddce6b894588c779c409326c18dc7019fafaf9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 216, + 504, + 273 + ], + "lines": [ + { + "bbox": [ + 104, + 216, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 504, + 273 + ], + "type": "text", + "content": "Figure 3: Within-Magnitude accuracy (aka the proportion of predictions within one order of magnitude of ground truth) for models predicting object and subject-object co-occurrences in heldout relations. Using LRE features outperforms LM only features by about " + }, + { + "bbox": [ + 104, + 216, + 504, + 273 + ], + "type": "inline_equation", + "content": "30\\%" + }, + { + "bbox": [ + 104, + 216, + 504, + 273 + ], + "type": "text", + "content": ". We find that it is much easier to predict object frequencies; the subj-object prediction models with LRE features only marginally outperform baseline performance." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 286, + 504, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 504, + 387 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 504, + 387 + ], + "type": "text", + "content": "We use Faithfulness and Causality as defined in Hernandez et al. (2024) as well as two other metrics: Faith Prob., which is the log probability of the correct answer as produced by an LRE, and Hard Causality, which is the same as the \"soft\" variant, but only counts the proportion of times the causality edit produces the target answer as the number one prediction. We use every example from the relations for which there are more than one object occurrence or subject-object co-occurrence. We do not provide an explicit signal for which relation an example comes from, but due to the bias of subjects/objects having similar frequencies within a relation, we train multiple models and evaluate on held out relations and average performance. In all settings, the held out set objects and relations are guaranteed to not have been in the training set." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 402, + 418, + 414 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 402, + 418, + 414 + ], + "spans": [ + { + "bbox": [ + 105, + 402, + 418, + 414 + ], + "type": "text", + "content": "5.2 LRE METRICS ENCODE FINE-GRAINED FREQUENCY INFORMATION" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 424, + 504, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 424, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 504, + 545 + ], + "type": "text", + "content": "Because of the difficulty of predicting the exact number of occurrences, we report accuracy within one order of magnitude of the ground truth. This measures whether the predicted value is within a reasonable range of the actual value. Results are shown in Figure 3. We find that language modeling features do not provide any meaningful signal towards predicting object or subject-object frequencies, and are only marginally above the baseline of predicting the average or random frequencies from the training data. On object frequency predictions, we find that LRE features encode a strong signal allowing for accurate predictions about " + }, + { + "bbox": [ + 104, + 424, + 504, + 545 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 424, + 504, + 545 + ], + "type": "text", + "content": " of the time. Mean absolute error of the predictions (in natural log space) for LRE features (LM-only features) are 2.1, (4.2) and 1.9, (2.3) on object prediction and subject-object prediction tasks, respectively. We find that subject-object cooccurrence frequency is likely too difficult to predict given the signals that we have here, as our predictions are higher than, but within one standard deviation of the mean baseline." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "text", + "content": "Feature Importance: How important are LRE features for predicting the frequency of an item? We perform feature permutation tests to see how much each feature (LRE features and log probts) contributes to the final answer. First, we check to see which features used to fit the regression are correlated, as if they are, then perturbing one will leave the signal present in another. In Appendix E, we show that only faithfulness and faith probability are strongly correlated, so for this test only, we train models with a single PCA component representing " + }, + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "inline_equation", + "content": "89\\%" + }, + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "text", + "content": " of the variance of those two features. We find that hard causality is by far the most important feature for generalization performance, causing a difference of about " + }, + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "text", + "content": " accuracy, followed by faithfulness measures with " + }, + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 560, + 504, + 661 + ], + "type": "text", + "content": " accuracy, providing evidence that the LRE features are encoding an important signal." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 677, + 272, + 688 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 272, + 688 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 272, + 688 + ], + "type": "text", + "content": "5.3 GENERALIZATION TO A NEW LM" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 734 + ], + "type": "text", + "content": "Next, we test the ability to generalize the regression fit of one LM to another, without requiring further supervision. If such a model could generalize, we can predict term counts to models for which we do not have access to their pretraining data. We keep the objective the same and apply" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 145, + 502, + 209 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 145 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 145 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 145 + ], + "type": "text", + "content": "Table 1: Within-Magnitude accuracy for different settings of train and test models. Overall, we find that fitting a regression on one model's LREs and evaluating on the other provides a meaningful signal compared to fitting using only log probability and task performance, or predicting the average training data frequency. The metric here is proportion of predictions within one order of " + }, + { + "bbox": [ + 104, + 89, + 504, + 145 + ], + "type": "inline_equation", + "content": "10\\mathrm{x}" + }, + { + "bbox": [ + 104, + 89, + 504, + 145 + ], + "type": "text", + "content": " the ground truth. Here, Eval. on GPT-J means the regression is fit on OLMo and evaluated on GPT-J." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 145, + 502, + 209 + ], + "lines": [ + { + "bbox": [ + 108, + 145, + 502, + 209 + ], + "spans": [ + { + "bbox": [ + 108, + 145, + 502, + 209 + ], + "type": "table", + "html": "
ModelPredicting Object Occs.Predicting Subject-Object Co-Occs.
Eval. on GPT-JEval. on OLMoEval. on GPT-JEval. on OLMo
LRE Features0.65±0.120.49±0.120.76±0.120.68±0.08
LogProb Features0.42±0.100.41±0.090.66±0.090.60±0.07
Mean Freq. Baseline0.31±0.150.41±0.170.57±0.150.67±0.16
", + "image_path": "c2a7842bab1f0f19feae7d25b7657ab844bac1cd20780d934a25f89f25c90b73.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 220, + 504, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 244 + ], + "type": "text", + "content": "the regression model, fit for example on OLMo (\"Train OLMo\" setting), to features extracted from GPT-J, using ground truth counts from The Pile (and vice versa, i.e., the \"Train GPT-J\" setting)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 248, + 506, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 248, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 506, + 327 + ], + "type": "text", + "content": "We again train a random forest regression model to predict the frequency of terms (either the subject-object frequency, or the object frequency alone; e.g., predicting \"John Lennon\" and \"The Beatles\" or just \"The Beatles\") on features from one of two models: either OLMo-7B (final checkpoint) or GPT-J, treating the other as the 'closed' model. We test the hypothesis that LRE features (faithfulness, causality) are useful in predicting term frequencies across different models, with the hope that this could be applied to dataset inference methods in the future, where access to the ground truth pretraining data counts is limited or unavailable." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 343, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 343, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 343, + 506, + 477 + ], + "type": "text", + "content": "Results Our results are presented in Table 1. First, we find that there is a signal in the LRE features that does not exist in the log probability features: We are able to fit a much better generalizable model when using LRE features as opposed to the LM probabilities alone. Second, evaluating on the LRE features of a heldout model (scaled by the ratio of total tokens trained between the two models) maintains around the same accuracy when fit on exact counts from OLMo, allowing us to predict occurrences without access to the GPT-J pretraining data. We find that predicting either the subject-object co-occurrences or object frequencies using LREs alone is barely better than the baseline. This task is much more difficult than predicting the frequency of the object alone, but our model may just also be unable to account for outliers in the data, which is tightly clustered around the mean (thus giving the high mean baseline performance of between approx. " + }, + { + "bbox": [ + 104, + 343, + 506, + 477 + ], + "type": "inline_equation", + "content": "60 - 70\\%" + }, + { + "bbox": [ + 104, + 343, + 506, + 477 + ], + "type": "text", + "content": "). Nevertheless, we show that linear structure for relations within LM representations encode a rich signal representing dataset frequency." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 494, + 209, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 494, + 209, + 505 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 209, + 505 + ], + "type": "text", + "content": "5.4 ERROR ANALYSIS" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 517, + 506, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 616 + ], + "type": "text", + "content": "In Table 2 we show example predictions from our regression model that we fit on OLMo and evaluate on heldout relations with LREs measured on GPT-J. We find that some relations transfer more easily than others, with the star constellation name transferring especially poorly. In general, the regression transfers well, without performance deteriorating much (about " + }, + { + "bbox": [ + 104, + 517, + 506, + 616 + ], + "type": "inline_equation", + "content": "5\\%" + }, + { + "bbox": [ + 104, + 517, + 506, + 616 + ], + "type": "text", + "content": " accuracy: see Figure 3 compared to the evaluation of GPT-J in Table 1), suggesting LREs encode information in a consistent way across models. We also find that the regression makes use of the full prediction range, producing values in the millions (see Table 2) as well as in the tens; The same regression shown in the table also predicts 59 occurrences for \"Caroline Bright\" (Will Smith's mother) where the ground truth is 48." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 638, + 190, + 650 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 638, + 190, + 650 + ], + "spans": [ + { + "bbox": [ + 105, + 638, + 190, + 650 + ], + "type": "text", + "content": "6 DISCUSSION" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 504, + 733 + ], + "type": "text", + "content": "Connection to Factual Recall Work in interpretability has focused largely around linear representations in recent years, and our work aims to address the open question of the conditions in which they form. We find that coherent linear representations form when the relevant terms (in this case subject-object co-occurrences) appear in pretraining at a consistent enough rate. Analogously, Chang et al. (2024) show that repeated exposure encourages higher retention of facts. Future work could investigate the connection between factual recall accuracy and linear representations." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 108, + 156, + 503, + 244 + ], + "blocks": [ + { + "bbox": [ + 104, + 89, + 504, + 155 + ], + "lines": [ + { + "bbox": [ + 104, + 89, + 504, + 155 + ], + "spans": [ + { + "bbox": [ + 104, + 89, + 504, + 155 + ], + "type": "text", + "content": "Table 2: Examples of a regression fit on OLMo LRE metrics and evaluated on GPT-J on heldout relations, demonstrating common error patterns: 1. Predictions are better for relations that are closer to those found in fitting the relation (country related relations), 2. Some relations, like star-constellation perform very poorly, possibly due to low frequency, 3. The regression model can be sensitive to the choice of subject (e.g., William vs. Harry), telling us the choice of data to measure LREs for is important for predictions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 156, + 503, + 244 + ], + "lines": [ + { + "bbox": [ + 108, + 156, + 503, + 244 + ], + "spans": [ + { + "bbox": [ + 108, + 156, + 503, + 244 + ], + "type": "table", + "html": "
Predicting Object Frequency in GPT-J, Regression fit on OLMo
RelationSubjectObjectPredictionGround TruthError
landmark-in-countryMenangle ParkAustralia2,986,9893,582,6021.2x
country-languageBrazilPortuguese845,406561,0051x
star-constellation nameArcturusBoötes974,5502,817346x
person-motherPrince WilliamPrincess Diana5,82627,0944.6x
person-motherPrince HarryPrincess Diana13127,094207x
", + "image_path": "df51c4633c26fe9b44c40708c2d148d48f2d69e5e1103b5e63f1c5f548f99183.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 256, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 256, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 256, + 506, + 456 + ], + "type": "text", + "content": "Linear Representations in LMs The difficulty of disentangling the formation of linear representations from increases in relation accuracy, especially in the few-shot case, is interesting. Across 24 relations, only the \"star-constellation-name\" and \"product-by-company\" relations have few-shot accuracies that far exceed their causality scores (and both are low frequency). Thus, it is still an open question how LMs are able to recall these tasks. The fact that few-shot accuracy and causality seem so closely linked is consistent with findings that ICL involves locating the right task (Min et al., 2022) and applying a 'function' to map input examples to outputs (Hendel et al., 2023; Todd et al., 2024). The finding that frequency controls this ability is perhaps unsurprising, as frequency also controls this linear structure emerging in static embeddings (Ethayarajh et al., 2019). Jiang et al. (2024) prove a strong frequency-based condition (based on matched log-odds between subjects and objects) and an implicit bias of gradient descent (when the frequency condition is not met) encourage linearity in LLMs; our work empirically shows conditions where linear representations tend to form in more realistic settings. If LMs are 'only' solving factual recall or performing ICL through linear structures, it is surprising how well this works at scale, but the simplicity also provides a promising way to understand LMs and ICL in general. An interesting avenue for future work would be to understand if and when LMs use a method that is not well approximated linearly to solve these types of tasks, as recent work has shown non-linearity can be preferred for some tasks in recurrent networks (Csordás et al., 2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 476, + 504, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 577 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 577 + ], + "type": "text", + "content": "Future Work in Predicting Dataset Frequency The ability to predict the contents of pretraining data is an important area for investigating memorization, contamination, and privacy of information used to train models. In our approach, we show it is possible to extract pretraining data signal without direct supervision. Without interpretability work on the nature of representations in LMs, we would not know of this implicit dataset signal, and we argue that interpretability can generate useful insights more broadly as well. Extensions on this work could include more information to tighten the prediction bounds on frequency, such as extracting additional features from the tokenizer (Hayase et al., 2024). We hope this work encourages future research in other ways properties of pretraining data affect LM representations for both improving and better understanding these models." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 602, + 196, + 615 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 196, + 615 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 196, + 615 + ], + "type": "text", + "content": "7 CONCLUSION" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 632, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 732 + ], + "type": "text", + "content": "We find a connection between linear representations of subject-relation-object factual triplets in LMs and the pretraining frequencies of the subjects and objects in those relations. This finding can guide future interpretability work in deciphering whether a linear representation for a given concept will exist in a model, since we observe that frequencies below a certain threshold for a given model will not yield LREs (a particular class of linear representation). From there we show that we can use the presence of linear representations to predict with some accuracy the frequency of terms in the pretraining corpus of an open-weights, closed-data model without supervision. Future work could aim to improve on our bounds of predicted frequencies. Overall, our work presents a meaningful step towards understanding the interactions between pretraining data and internal LM representations." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 218, + 94 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 504, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 504, + 129 + ], + "type": "text", + "content": "This work was performed while JM was an intern at Ai2. We thank the anonymous reviewers and members of the Aristo and AllenNLP teams at Ai2 for valuable feedback." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 148, + 175, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 148, + 175, + 159 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 175, + 159 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 166, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 166, + 505, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 166, + 505, + 212 + ], + "spans": [ + { + "bbox": [ + 106, + 166, + 505, + 212 + ], + "type": "text", + "content": "Viraat Aryabumi, Yixuan Su, Raymond Ma, Adrien Morisot, Ivan Zhang, Acyr Locatelli, Marzieh Fadaee, Ahmet Üstün, and Sara Hooker. To code or not to code? exploring impact of code in pretraining. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=zSfeN1uAcx." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 220, + 505, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 220, + 505, + 266 + ], + "spans": [ + { + "bbox": [ + 106, + 220, + 505, + 266 + ], + "type": "text", + "content": "Giuseppe Ateniese, Luigi V Mancini, Angelo Spognardi, Antonio Villani, Domenico Vitali, and Giovanni Felici. Hacking smart machines with smarter ones: How to extract meaningful data from machine learning classifiers. International Journal of Security and Networks, 10(3):137-150, 2015. URL https://dl.acm.org/doi/10.1504/IJSN.2015.071829." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 274, + 505, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 274, + 505, + 309 + ], + "spans": [ + { + "bbox": [ + 106, + 274, + 505, + 309 + ], + "type": "text", + "content": "Sid Black, Lee Sharkey, Leo Grinsztajn, Eric Winsor, Dan Braun, Jacob Merizian, Kip Parker, Carlos Ramón Guevara, Beren Millidge, Gabriel Alfour, and Connor Leahy. Interpreting neural networks through the polytope lens, 2022. URL https://arxiv.org/abs/2211.12312." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 318, + 505, + 362 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 318, + 505, + 362 + ], + "spans": [ + { + "bbox": [ + 106, + 318, + 505, + 362 + ], + "type": "text", + "content": "Nicholas Carlini, Steve Chien, Milad Nasr, Shuang Song, Andreas Terzis, and Florian Tramér. Membership inference attacks from first principles. In 2022 IEEE Symposium on Security and Privacy (SP), pp. 1897-1914, 2022. URL https://ieeexplore.ieee.org/document/9833649/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 372, + 505, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 372, + 505, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 372, + 505, + 417 + ], + "type": "text", + "content": "Nicholas Carlini, Daphne Ippolito, Matthew Jagielski, Katherine Lee, Florian Tramer, and Chiyuan Zhang. Quantifying memorization across neural language models. In The Eleventh International Conference on Learning Representations, 2023. URL https://openreview.net/forum?id=TatRHT_1cK." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 426, + 505, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 426, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 106, + 426, + 505, + 472 + ], + "type": "text", + "content": "Hoyeon Chang, Jinho Park, Seonghyeon Ye, Sohee Yang, Youngkyung Seo, Du-Seong Chang, and Minjoon Seo. How do large language models acquire factual knowledge during pretraining? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=TYdzj1EvBP." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 480, + 505, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 480, + 505, + 548 + ], + "spans": [ + { + "bbox": [ + 106, + 480, + 505, + 548 + ], + "type": "text", + "content": "David Chanin, Anthony Hunter, and Oana-Maria Camburu. Identifying Linear Relational Concepts in Large Language Models. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 1524-1535. Association for Computational Linguistics, 2024. doi: 10.18653/v1/2024.naacl-long.85. URL https://aclanthology.org/2024.naacl-long.85." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 556, + 505, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 556, + 505, + 635 + ], + "spans": [ + { + "bbox": [ + 106, + 556, + 505, + 635 + ], + "type": "text", + "content": "Róbert Csordás, Christopher Potts, Christopher D Manning, and Atticus Geiger. Recurrent neural networks learn to store and generate sequences using non-linear representations. In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 248-262, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.17. URL https://aclanthology.org/2024.blackboxnlp-1.17/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 644, + 505, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 644, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 644, + 505, + 678 + ], + "type": "text", + "content": "Yanai Elazar, Shauli Ravfogel, Alon Jacovi, and Yoav Goldberg. Amnesic Probing: Behavioral Explanation with Amnesic Counterfactuals. Transactions of the Association for Computational Linguistics, 9:160-175, 03 2021. URL https://doi.org/10.1162/tacl_a_00359." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 687, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 687, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 505, + 732 + ], + "type": "text", + "content": "Yanai Elazar, Nora Kassner, Shauli Ravfogel, Amir Feder, Abhilasha Ravichander, Marius Mosbach, Yonatan Belinkov, Hinrich Schütze, and Yoav Goldberg. Measuring causal effects of data statistics on language model's 'factual' predictions. arXiv preprint arXiv:2207.14251, 2022. URL https://arxiv.org/abs/2207.14251." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 138 + ], + "type": "text", + "content": "Yanai Elazar, Akshita Bhagia, Ian Helgi Magnusson, Abhilasha Ravichander, Dustin Schwenk, Alane Suhr, Evan Pete Walsh, Dirk Groeneveld, Luca Soldaini, Sameer Singh, Hannaneh Hajishirzi, Noah A. Smith, and Jesse Dodge. What's in my big data? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=RvfPnOkPV4." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 505, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 505, + 190 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 505, + 190 + ], + "type": "text", + "content": "Nelson Elhage, Neel Nanda, Catherine Olsson, Tom Henighan, Nicholas Joseph, Ben Mann, Amanda Askell, Yuntao Bai, Anna Chen, Tom Conerly, et al. A mathematical framework for transformer circuits. Transformer Circuits Thread, 2021. URL https://transformer-circuits.pub/2021/framework/index.html." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 196, + 506, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 196, + 506, + 285 + ], + "spans": [ + { + "bbox": [ + 105, + 196, + 506, + 285 + ], + "type": "text", + "content": "Hady Elsahar, Pavlos Vougiouklis, Arslen Remaci, Christophe Gravier, Jonathon Hare, Frederique Laforest, and Elena Simperl. T-REx: A large scale alignment of natural language with knowledge base triples. In Nicoletta Calzolari, Khalid Choukri, Christopher Cieri, Thierry Declerck, Sara Goggi, Koiti Hasida, Hitoshi Isahara, Bente Maegaard, Joseph Mariani, Hélène Mazo, Asuncion Moreno, Jan Odijk, Stelios Piperidis, and Takenobu Tokunaga (eds.), Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018), Miyazaki, Japan, May 2018. European Language Resources Association (ELRA). URL https://aclanthology.org/L18-1544." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 292, + 505, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 292, + 505, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 292, + 505, + 348 + ], + "type": "text", + "content": "Kawin Ethayarajh, David Duvenaud, and Graeme Hirst. Towards Understanding Linear Word Analogies. In Anna Korhonen, David Traum, and Lluís Márquez (eds.), Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pp. 3253-3262. Association for Computational Linguistics, 2019. doi: 10.18653/v1/P19-1315. URL https://aclanthology.org/P19-1315." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 354, + 505, + 400 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 505, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 505, + 400 + ], + "type": "text", + "content": "Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020. URL https://arxiv.org/abs/2101.00027." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 406, + 505, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 505, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 505, + 452 + ], + "type": "text", + "content": "Leo Gao, Tom Dupre la Tour, Henk Tillman, Gabriel Goh, Rajan Troll, Alec Radford, Ilya Sutskever, Jan Leike, and Jeffrey Wu. Scaling and evaluating sparse autoencoders. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=tcsZt9ZNKD." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 457, + 505, + 504 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 505, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 505, + 504 + ], + "type": "text", + "content": "Shivam Garg, Dimitris Tsipras, Percy Liang, and Gregory Valiant. What can transformers learn in-context? a case study of simple function classes. In Alice H. Oh, Alekh Agarwal, Danielle Belgrave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022. URL https://openreview.net/forum?id=f1NZJ2eOet." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 510, + 505, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 505, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 505, + 567 + ], + "type": "text", + "content": "Anna Gladkova, Aleksandr Drozd, and Satoshi Matsuoka. Analogy-based detection of morphological and semantic relations with word embeddings: what works and what doesn't. In Jacob Andreas, Eunsol Choi, and Angeliki Lazaridou (eds.), Proceedings of the NAACL Student Research Workshop, pp. 8-15, San Diego, California, June 2016. Association for Computational Linguistics. doi: 10.18653/v1/N16-2002. URL https://aclanthology.org/N16-2002/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 572, + 506, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 572, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 105, + 572, + 506, + 651 + ], + "type": "text", + "content": "Dirk Groeneveld, Iz Beltagy, Evan Walsh, Akshita Bhagia, Rodney Kinney, Oyvind Tafjord, Ananya Jha, Hamish Ivison, Ian Magnusson, Yizhong Wang, et al. OLMo: Accelerating the science of language models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15789-15809, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.841. URL https://aclanthology.org/2024.acl-long.841/." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 657, + 505, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 505, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 505, + 702 + ], + "type": "text", + "content": "Jonathan Hayase, Alisa Liu, Yejin Choi, Sewoong Oh, and Noah A. Smith. Data mixture inference: What do BPE tokenizers reveal about their training data? In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=EHXyeImux0." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 505, + 733 + ], + "type": "text", + "content": "Roee Hendel, Mor Geva, and Amir Globerson. In-Context Learning Creates Task Vectors. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Findings of the Association for Computational" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 312, + 761 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 115, + 81, + 505, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 81, + 505, + 116 + ], + "spans": [ + { + "bbox": [ + 115, + 81, + 505, + 116 + ], + "type": "text", + "content": "Linguistics: EMNLP 2023, pp. 9318-9333. Association for Computational Linguistics, 2023. doi: 10.18653/v1/2023-findings-emnlp.624. URL https://aclanthology.org/2023-findings-emnlp.624." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 167 + ], + "type": "text", + "content": "Evan Hernandez, Arnab Sen Sharma, Tal Haklay, Kevin Meng, Martin Wattenberg, Jacob Andreas, Yonatan Belinkov, and David Bau. Linearity of relation decoding in transformer language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=w7LU2s14kE." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 175, + 505, + 218 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 175, + 505, + 218 + ], + "spans": [ + { + "bbox": [ + 106, + 175, + 505, + 218 + ], + "type": "text", + "content": "Robert Huben, Hoagy Cunningham, Logan Riggs Smith, Aidan Ewart, and Lee Sharkey. Sparse autoencoders find highly interpretable features in language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=F76bwRSLeK." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 226, + 505, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 226, + 505, + 270 + ], + "spans": [ + { + "bbox": [ + 106, + 226, + 505, + 270 + ], + "type": "text", + "content": "Yibo Jiang, Goutham Rajendran, Pradeep Kumar Ravikumar, Bryon Aragam, and Victor Veitch. On the origins of linear representations in large language models. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=otuTw4Mghk." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 278, + 505, + 345 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 278, + 505, + 345 + ], + "spans": [ + { + "bbox": [ + 106, + 278, + 505, + 345 + ], + "type": "text", + "content": "Marzena Karpinska, Bofang Li, Anna Rogers, and Aleksandr Drozd. Subcharacter information in Japanese embeddings: When is it worth it? In Georgiana Dinu, Miguel Ballesteros, Avirup Sil, Sam Bowman, Wael Hamza, Anders Sogaard, Tahira Naseem, and Yoav Goldberg (eds.), Proceedings of the Workshop on the Relevance of Linguistic Structure in Neural Architectures for NLP, pp. 28-37, Melbourne, Australia, July 2018. Association for Computational Linguistics. doi: 10.18653/v1/W18-2905. URL https://aclanthology.org/W18-2905/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 351, + 505, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 351, + 505, + 407 + ], + "spans": [ + { + "bbox": [ + 106, + 351, + 505, + 407 + ], + "type": "text", + "content": "Maximilian Köper, Christian Scheible, and Sabine Schulte im Walde. Multilingual reliability and \"semantic\" structure of continuous word spaces. In Matthew Purver, Mehrnoosh Sadrzadeh, and Matthew Stone (eds.), Proceedings of the 11th International Conference on Computational Semantics, pp. 40-45, London, UK, April 2015. Association for Computational Linguistics. URL https://aclanthology.org/W15-0105/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 415, + 505, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 415, + 505, + 503 + ], + "spans": [ + { + "bbox": [ + 106, + 415, + 505, + 503 + ], + "type": "text", + "content": "Shayne Longpre, Gregory Yauney, Emily Reif, Katherine Lee, Adam Roberts, Barret Zoph, Denny Zhou, Jason Wei, Kevin Robinson, David Mimno, and Daphne Ippolito. A pretrainer's guide to training data: Measuring the effects of data age, domain coverage, quality, & toxicity. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 3245-3276, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.179. URL https://aclanthology.org/2024.naacl-long.179/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 510, + 505, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 510, + 505, + 555 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 505, + 555 + ], + "type": "text", + "content": "Yingwei Ma, Yue Liu, Yue Yu, Yuanliang Zhang, Yu Jiang, Changjian Wang, and Shanshan Li. At which training stage does code data help LLMs reasoning? In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KIPJKST4gw." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 562, + 505, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 562, + 505, + 639 + ], + "spans": [ + { + "bbox": [ + 106, + 562, + 505, + 639 + ], + "type": "text", + "content": "Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In Anna Rogers, Jordan Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9802–9822, Toronto, Canada, July 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.acl-long.546. URL https://aclanthology.org/2023.acl-long.546." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 647, + 505, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 505, + 692 + ], + "type": "text", + "content": "R Thomas McCoy, Shunyu Yao, Dan Friedman, Mathew D Hardy, and Thomas L Griffiths. Embers of autoregression show how large language models are shaped by the problem they are trained to solve. Proceedings of the National Academy of Sciences, 121(41):e2322420121, 2024. URL https://www.pnas.org/doi/abs/10.1073/pnas.2322420121." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "text", + "content": "Jack Merullo, Carsten Eickhoff, and Ellie Pavlick. Language models implement simple Word2Vec-style vector arithmetic. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 82, + 505, + 732 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 115, + 82, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 505, + 117 + ], + "type": "text", + "content": "Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 5030-5047, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.nacl-long.281. URL https://aclanthology.org/2024.nacl-long.281." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 122, + 505, + 156 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 122, + 505, + 156 + ], + "spans": [ + { + "bbox": [ + 107, + 122, + 505, + 156 + ], + "type": "text", + "content": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781, 2013a. URL https://arxiv.org/abs/1301.3781." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 163, + 505, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 505, + 232 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 505, + 232 + ], + "type": "text", + "content": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. Distributed representations of words and phrases and their compositionality. In C.J. Burges, L. Bottou, M. Welling, Z. Ghahramani, and K.Q. Weinberger (eds.), Advances in Neural Information Processing Systems, volume 26. Curran Associates, Inc., 2013b. URL https://proceedings.neurips.cc/paper_files/paper/2013/file/9aa42b31882ec039965f3c4923ce901b-Paper.pdf." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 238, + 505, + 306 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 238, + 505, + 306 + ], + "spans": [ + { + "bbox": [ + 106, + 238, + 505, + 306 + ], + "type": "text", + "content": "Sewon Min, Xinxi Lyu, Ari Holtzman, Mikel Artetxe, Mike Lewis, Hannaneh Hajishirzi, and Luke Zettlemoyer. Rethinking the role of demonstrations: What makes in-context learning work? In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing, pp. 11048-11064, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022.emnlp-main.759. URL https://aclanthology.org/2022.emnlp-main.759/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 312, + 505, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 312, + 505, + 346 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 505, + 346 + ], + "type": "text", + "content": "Chris Olah, Nick Cammarata, Ludwig Schubert, Gabriel Goh, Michael Petrov, and Shan Carter. Zoom in: An introduction to circuits. Distill, 5(3):e00024-001, 2020. URL https://distill.pub/2020/circuits/zoom-in/." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 353, + 505, + 398 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 353, + 505, + 398 + ], + "spans": [ + { + "bbox": [ + 106, + 353, + 505, + 398 + ], + "type": "text", + "content": "Yonatan Oren, Nicole Meister, Niladri S. Chatterji, Faisal Ladhak, and Tatsunori Hashimoto. Proving test set contamination in black-box language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=KS8mIvetg2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 405, + 505, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 405, + 505, + 450 + ], + "spans": [ + { + "bbox": [ + 106, + 405, + 505, + 450 + ], + "type": "text", + "content": "Alberto Paccanaro and Geoffrey E Hinton. Learning Hierarchical Structures with Linear Relational Embedding. In Advances in Neural Information Processing Systems, volume 14. MIT Press, 2001. URL https://papers.nips.cc/paper_files/paper/2001/bit/814a9c18f5abff398787c9cfcbf3d80c-Abstract.html." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 456, + 505, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 456, + 505, + 492 + ], + "spans": [ + { + "bbox": [ + 106, + 456, + 505, + 492 + ], + "type": "text", + "content": "Kiho Park, Yo Joong Choe, and Victor Veitch. The Linear Representation Hypothesis and the Geometry of Large Language Models. In *Forty-First International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=UGpGkLzwpP." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 498, + 505, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 498, + 505, + 555 + ], + "spans": [ + { + "bbox": [ + 106, + 498, + 505, + 555 + ], + "type": "text", + "content": "Jeffrey Pennington, Richard Socher, and Christopher Manning. GloVe: Global vectors for word representation. In Alessandro Moschitti, Bo Pang, and Walter Daelemans (eds.), Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pp. 1532-1543, Doha, Qatar, October 2014. Association for Computational Linguistics. doi: 10.3115/v1/D14-1162. URL https://aclanthology.org/D14-1162." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 561, + 505, + 617 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 561, + 505, + 617 + ], + "spans": [ + { + "bbox": [ + 106, + 561, + 505, + 617 + ], + "type": "text", + "content": "Shauli Ravfogel, Yanai Elazar, Hila Gonen, Michael Twiton, and Yoav Goldberg. Null it out: Guarding protected attributes by iterative nullspace projection. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pp. 7237-7256, Online, July 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.acl-main.647." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 624, + 505, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 624, + 505, + 692 + ], + "spans": [ + { + "bbox": [ + 106, + 624, + 505, + 692 + ], + "type": "text", + "content": "Yasaman Razeghi, Robert L Logan IV, Matt Gardner, and Sameer Singh. Impact of pretraining term frequencies on few-shot numerical reasoning. In Yoav Goldberg, Zornitsa Kozareva, and Yue Zhang (eds.), Findings of the Association for Computational Linguistics: EMNLP 2022, pp. 840-854, Abu Dhabi, United Arab Emirates, December 2022. Association for Computational Linguistics. doi: 10.18653/v1/2022-findings-emnlp.59. URL https://aclanthology.org/2022-findings-emnlp.59/." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 698, + 505, + 732 + ], + "type": "text", + "content": "Yasaman Razeghi, Hamish Ivison, Sameer Singh, and Yanai Elazar. Backtracking mathematical reasoning of language models to the pretraining data. In NeurIPS Workshop on Attributing Model Behavior at Scale, 2023. URL https://openreview.net/forum?id=EKvqw9k3lC." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 731 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 117 + ], + "type": "text", + "content": "Nina Rimsky, Nick Gabrieli, Julian Schulz, Meg Tong, Evan Hubinger, and Alexander Turner. Steering llama 2 via contrastive activation addition. pp. 15504-15522, August 2024. doi: 10.18653/v1/2024.acl-long.828. URL https://aclanthology.org/2024.acl-long.828/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 124, + 504, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 124, + 504, + 158 + ], + "spans": [ + { + "bbox": [ + 105, + 124, + 504, + 158 + ], + "type": "text", + "content": "G. Salton, A. Wong, and C. S. Yang. A vector space model for automatic indexing. Commun. ACM, 18(11):613-620, November 1975. ISSN 0001-0782. doi: 10.1145/361219.361220. URL https://doi.org/10.1145/361219.361220." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 166, + 504, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 166, + 504, + 222 + ], + "spans": [ + { + "bbox": [ + 105, + 166, + 504, + 222 + ], + "type": "text", + "content": "Naomi Saphra and Sarah Wiegrefe. Mechanistic? In Yonatan Belinkov, Najoung Kim, Jaap Jumelet, Hosein Mohebbi, Aaron Mueller, and Hanjie Chen (eds.), Proceedings of the 7th BlackboxNLP Workshop: Analyzing and Interpreting Neural Networks for NLP, pp. 480-498, Miami, Florida, US, November 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.blackboxnlp-1.30. URL https://aclanthology.org/2024.blackboxnlp-1.30/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 229, + 504, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 229, + 504, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 229, + 504, + 297 + ], + "type": "text", + "content": "Preethi Seshadri, Sameer Singh, and Yanai Elazar. The bias amplification paradox in text-to-image generation. In Kevin Duh, Helena Gomez, and Steven Bethard (eds.), Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pp. 6367-6384, Mexico City, Mexico, June 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.naacl-long.353. URL https://aclanthology.org/2024.naacl-long.353/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 304, + 504, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 504, + 350 + ], + "type": "text", + "content": "Weijia Shi, Anirudh Ajith, Mengzhou Xia, Yangsibo Huang, Daogao Liu, Terra Blevins, Danqi Chen, and Luke Zettlemoyer. Detecting pretraining data from large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=zWqr3MQuNs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 357, + 504, + 402 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 357, + 504, + 402 + ], + "spans": [ + { + "bbox": [ + 105, + 357, + 504, + 402 + ], + "type": "text", + "content": "Reza Shokri, Marco Stronati, Congzheng Song, and Vitaly Shmatikov. Membership inference attacks against machine learning models. In 2017 IEEE Symposium on Security and Privacy (SP), pp. 3-18, 2017. doi: 10.1109/SP.2017.41. URL https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7958568." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 410, + 504, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 410, + 504, + 477 + ], + "spans": [ + { + "bbox": [ + 105, + 410, + 504, + 477 + ], + "type": "text", + "content": "Aviv Slobodkin, Omer Goldman, Avi Caciularu, Ido Dagan, and Shauli Ravfogel. The curious case of hallucinatory (un)answerability: Finding truths in the hidden states of over-confident large language models. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pp. 3607-3625, Singapore, December 2023. Association for Computational Linguistics. doi: 10.18653/v1/2023.emnlp-main.220. URL https://aclanthology.org/2023.emnlp-main.220/." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 485, + 504, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 485, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 105, + 485, + 504, + 563 + ], + "type": "text", + "content": "Luca Soldaini, Rodney Kinney, Akshita Bhagia, Dustin Schwenk, David Atkinson, Russell Authur, Ben Bogin, Khyathi Chandu, Jennifer Dumas, Yanai Elazar, et al. Dolma: an open corpus of three trillion tokens for language model pretraining research. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 15725-15788, Bangkok, Thailand, August 2024. Association for Computational Linguistics. doi: 10.18653/v1/2024.acl-long.840. URL https://aclanthology.org/2024.acl-long.840/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 571, + 504, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 504, + 626 + ], + "type": "text", + "content": "Nishant Subramani, Nivedita Suresh, and Matthew Peters. Extracting Latent Steering Vectors from Pretrained Language Models. In Smaranda Muresan, Preslav Nakov, and Aline Villavicencio (eds.), Findings of the Association for Computational Linguistics: ACL 2022, pp. 566-581. Association for Computational Linguistics, 2022. doi: 10.18653/v1/2022-findings-acl.48. URL https://aclanthology.org/2022-findings-acl.48." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 634, + 504, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 634, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 105, + 634, + 504, + 667 + ], + "type": "text", + "content": "Anshuman Suri and David Evans. Formalizing and estimating distribution inference risks. Proceedings on Privacy Enhancing Technologies, 2022. URL https://arxiv.org/abs/2109.06024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 677, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 504, + 731 + ], + "type": "text", + "content": "Adly Templeton, Tom Conerly, Jonathan Marcus, Jack Lindsey, Trenton Bricken, Brian Chen, Adam Pearce, Craig Citro, Emmanuel Ameisen, Andy Jones, et al. Scaling Monoseismicity: Extracting Interpretable Features from Claude 3 Sonnet. 2024. URL https://transformer-circuits.pub/2024/scaling-monoseismicity/index.html." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 504, + 300 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 105, + 82, + 504, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 504, + 117 + ], + "type": "text", + "content": "Eric Todd, Millicent Li, Arnab Sen Sharma, Aaron Mueller, Byron C Wallace, and David Bau. Function vectors in large language models. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=AwyxtyMwaG." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 504, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 504, + 146 + ], + "type": "text", + "content": "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 504, + 196 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 504, + 196 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 504, + 196 + ], + "type": "text", + "content": "Xinyi Wang, Alfonso Amayuelas, Kexun Zhang, Liangming Pan, Wenhu Chen, and William Yang Wang. Understanding reasoning ability of language models from the perspective of reasoning paths aggregation. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=dZsEOFUDew." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 203, + 504, + 248 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 203, + 504, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 504, + 248 + ], + "type": "text", + "content": "Xinyi Wang, Antonis Antoniades, Yanai Elazar, Alfonso Amayuelas, Alon Albalak, Kexun Zhang, and William Yang Wang. Generalization v.s. memorization: Tracing language models' capabilities back to pretraining data. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=IQxBDLmVpT." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 255, + 504, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 255, + 504, + 300 + ], + "spans": [ + { + "bbox": [ + 105, + 255, + 504, + 300 + ], + "type": "text", + "content": "Sang Michael Xie, Hieu Pham, Xuanyi Dong, Nan Du, Hanxiao Liu, Yifeng Lu, Percy Liang, Quoc V Le, Tengyu Ma, and Adams Wei Yu. Doremi: Optimizing data mixtures speeds up language model pretraining. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=1XuByUeHhd." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 105, + 320, + 197, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 320, + 197, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 197, + 333 + ], + "type": "text", + "content": "A LIMITATIONS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 345, + 506, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 506, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 506, + 499 + ], + "type": "text", + "content": "While our approach thoroughly tracks exposure to individual terms and formation of LRE features across pretraining, we can not draw causal6 claims about how exposure affects individual representations, due to the cost of counterfactual pretraining. We try to address this by showing the frequency of individual terms can be predicted with some accuracy from measurements of LRE presence. We motivate this approach as a possible way to detect the training data of closed-data LMs; however, we are not able to make any guarantees on its efficacy in settings not shown here, and would caution drawing strong conclusions without additional information. Furthermore, we find that our method is relatively worse at predicting subject-object co-occurrences than object occurrences, and our method fails to account for the harder task. Future work could expand on this tool by incorporating it with other data inference methods for greater confidence. We also do not discuss the role of the presentation of facts on the formation of LRE features, but following Elsahar et al. (2018) and the strength of the relationship we find, we speculate this has minimal impact. Note that the BatchSearch tool we release tracks the exact position index of the searched terms, thus facilitating future work on questions about templates and presentation of information." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 515, + 381, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 381, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 381, + 528 + ], + "type": "text", + "content": "B EFFECT OF TRAINING ON INCORRECT EXAMPLES" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 540, + 506, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 651 + ], + "type": "text", + "content": "In Hernandez et al. (2024), examples are filtered to ones in which the LM gets correct, assuming that an LRE will only exist once a model has attained the knowledge to answer the relation accuracy (e.g., knowing many country capitals). We find that the choice of examples for fitting LREs is not entirely dependent on the model 'knowing' that relation perfectly (i.e., attains high accuracy). This is convenient for our study, where we test early checkpoint models, that do not necessarily have all of the information that they will have seen later in training. In Figure 5, we show faithfulness on relations where the LRE was fit with all, half, or zero correct examples. We omit data for which the model did not get enough incorrect examples. Averages across relations for which we have enough data are shown in Figure 4, which shows that there is not a considerable difference in the choice of LRE samples to train with." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 666, + 299, + 679 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 299, + 679 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 299, + 679 + ], + "type": "text", + "content": "C LRE HYPERPARAMETER TUNING" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 690, + 504, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 690, + 504, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 690, + 504, + 714 + ], + "type": "text", + "content": "There are three hyperparameters for fitting LREs: layer at which to edit the subject, the beta term used to scale the LRE weight matrix, and the rank of the pseudoinverse matrix used to make edits for" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 457, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 457, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 457, + 732 + ], + "type": "text", + "content": "6 And thus mechanistic, in the narrow technical sense of the term (Saphra & Wegreffe, 2024)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 312, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 106, + 304, + 219 + ], + "blocks": [ + { + "bbox": [ + 106, + 106, + 304, + 219 + ], + "lines": [ + { + "bbox": [ + 106, + 106, + 304, + 219 + ], + "spans": [ + { + "bbox": [ + 106, + 106, + 304, + 219 + ], + "type": "image", + "image_path": "01813bff727a9fe1a067c13b336446f4f03f90c681bba96141df80054d9c6e2a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 230, + 504, + 254 + ], + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 254 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 254 + ], + "type": "text", + "content": "Figure 4: Average Causality and Faithfulness results across relations depending on if the LRE was fit with correct or incorrect samples. We find no notable difference in the choice of examples." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 304, + 106, + 503, + 218 + ], + "blocks": [ + { + "bbox": [ + 304, + 106, + 503, + 218 + ], + "lines": [ + { + "bbox": [ + 304, + 106, + 503, + 218 + ], + "spans": [ + { + "bbox": [ + 304, + 106, + 503, + 218 + ], + "type": "image", + "image_path": "e1bc4c029a94a41a2487a11963db8efb8a4bc436b13831cb494622be6967af91.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 312, + 504, + 647 + ], + "blocks": [ + { + "bbox": [ + 106, + 312, + 504, + 647 + ], + "lines": [ + { + "bbox": [ + 106, + 312, + 504, + 647 + ], + "spans": [ + { + "bbox": [ + 106, + 312, + 504, + 647 + ], + "type": "image", + "image_path": "b1468d161541d0304c9b2f6e11599e08df3b2e7d0401e29a1958c7c2c8462627.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 658, + 504, + 703 + ], + "lines": [ + { + "bbox": [ + 104, + 658, + 504, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 504, + 703 + ], + "type": "text", + "content": "Figure 5: Causality and Faithfulness results for each relation depending on if the LRE was fit with correct or incorrect samples. Note that relations with only one bar do not have zeros in the other categories. It means that there was not enough data that the model (OLMo-7B) got wrong to have enough examples to fit." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 124, + 106, + 500, + 396 + ], + "blocks": [ + { + "bbox": [ + 259, + 91, + 352, + 100 + ], + "lines": [ + { + "bbox": [ + 259, + 91, + 352, + 100 + ], + "spans": [ + { + "bbox": [ + 259, + 91, + 352, + 100 + ], + "type": "text", + "content": "Best Layer Beta vs. Faithfulness" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 124, + 106, + 500, + 396 + ], + "lines": [ + { + "bbox": [ + 124, + 106, + 500, + 396 + ], + "spans": [ + { + "bbox": [ + 124, + 106, + 500, + 396 + ], + "type": "image", + "image_path": "ccf55fa257666450f7d7be8f3fb3642fd19ac04cc3fb787bce208255d416229f.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 423, + 504, + 458 + ], + "lines": [ + { + "bbox": [ + 104, + 423, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 504, + 458 + ], + "type": "text", + "content": "Figure 6: OLMo 0424 7B per layer faithfulness scores as a function of the choice of layer at which to fit the LRE. Note we do not use these results to choose the layer for the LRE, instead preferring the results from the causality sweep." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 479, + 504, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 504, + 589 + ], + "type": "text", + "content": "measuring causality. Beta is exclusive to measuring faithfulness and rank is exclusive to causality. We test the same ranges for each as in Hernandez et al. (2024): [0, 5] beta and [0, full_rank] for causality at varying intervals. Those intervals are every 2 from [0,100], every 5 from [100,200], every 25 from [200, 500], every 50 from [500, 1000], every 250 from [1000, hidden_size]. We perform the hyperparameter sweeps across faithfulness and causality, but we choose the layer to edit based on the causality score. In cases where this is not the same layer as what faithfulness would decide, we use the layer causality chooses, as it would not make sense to train one LRE for each metric. We refer the reader to Hernandez et al. (2024) for more details on the interactions between hyperparameters and the choice of layer. The results of our sweeps on OLMo-7B across layers in Figures 6 and 7 and across beta and rank choices in Figures 8 and 9." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 606, + 384, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 606, + 384, + 618 + ], + "spans": [ + { + "bbox": [ + 105, + 606, + 384, + 618 + ], + "type": "text", + "content": "D BATCH SEARCH COUNTS COMPARED TO WIMBD" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 632, + 504, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 667 + ], + "type": "text", + "content": "In Figure 10, we find that What's in My Big Data (Elazar et al., 2024) matches very well to batch search co-occurrences; however, WIMBD tends to over-predict co-occurrences (slope less than 1), due to the sequence length being shorter than many documents, as discussed in the main paper." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 684, + 360, + 696 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 684, + 360, + 696 + ], + "spans": [ + { + "bbox": [ + 105, + 684, + 360, + 696 + ], + "type": "text", + "content": "E FEATURE CORRELATIONS AND IMPORTANCES" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 732 + ], + "type": "text", + "content": "Our feature importance test is shown in Figure 12. This permutation test was done on the heldout data to show which features contribute the most to generalization performance. We use PCA to" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 125, + 249, + 501, + 536 + ], + "blocks": [ + { + "bbox": [ + 277, + 234, + 334, + 243 + ], + "lines": [ + { + "bbox": [ + 277, + 234, + 334, + 243 + ], + "spans": [ + { + "bbox": [ + 277, + 234, + 334, + 243 + ], + "type": "text", + "content": "Layer vs. Causality" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 125, + 249, + 501, + 536 + ], + "lines": [ + { + "bbox": [ + 125, + 249, + 501, + 536 + ], + "spans": [ + { + "bbox": [ + 125, + 249, + 501, + 536 + ], + "type": "image", + "image_path": "7e45008f997fe87d85da17fac14afbe181e1bd855d46089d2000d6d279f17c0b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 566, + 504, + 588 + ], + "lines": [ + { + "bbox": [ + 104, + 566, + 504, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 504, + 588 + ], + "type": "text", + "content": "Figure 7: OLMo 0424 7B per layer causality scores as a function of the choice of layer at which to fit the LRE." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 254, + 233, + 349, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 254, + 233, + 349, + 243 + ], + "spans": [ + { + "bbox": [ + 254, + 233, + 349, + 243 + ], + "type": "text", + "content": "Best Layer Beta vs. Faithfulness" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 116, + 248, + 501, + 541 + ], + "blocks": [ + { + "bbox": [ + 116, + 248, + 501, + 541 + ], + "lines": [ + { + "bbox": [ + 116, + 248, + 501, + 541 + ], + "spans": [ + { + "bbox": [ + 116, + 248, + 501, + 541 + ], + "type": "image", + "image_path": "834ebec1094b86ae7cfc91aa6d20e88a13cb123b46a91be46bb670f3f4ffa542.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 127, + 573, + 482, + 586 + ], + "lines": [ + { + "bbox": [ + 127, + 573, + 482, + 586 + ], + "spans": [ + { + "bbox": [ + 127, + 573, + 482, + 586 + ], + "type": "text", + "content": "Figure 8: OLMo 0424 7B LRE Beta hyperparameter sweep at highest performing layer." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 254, + 499, + 541 + ], + "blocks": [ + { + "bbox": [ + 262, + 239, + 348, + 247 + ], + "lines": [ + { + "bbox": [ + 262, + 239, + 348, + 247 + ], + "spans": [ + { + "bbox": [ + 262, + 239, + 348, + 247 + ], + "type": "text", + "content": "Best Layer Rank vs. Causality" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 254, + 499, + 541 + ], + "lines": [ + { + "bbox": [ + 126, + 254, + 499, + 541 + ], + "spans": [ + { + "bbox": [ + 126, + 254, + 499, + 541 + ], + "type": "image", + "image_path": "d48a6dd92df0c9e1200f947b2cd9997a53d627e698e84082717cc4c1719b0974.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 126, + 571, + 483, + 583 + ], + "lines": [ + { + "bbox": [ + 126, + 571, + 483, + 583 + ], + "spans": [ + { + "bbox": [ + 126, + 571, + 483, + 583 + ], + "type": "text", + "content": "Figure 9: OLMo 0424 7B LRE Rank hyperparameter sweep at highest performing layer." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 127, + 102, + 483, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 102, + 483, + 119 + ], + "spans": [ + { + "bbox": [ + 127, + 102, + 483, + 119 + ], + "type": "text", + "content": "WIMBD vs Batch Cooccurrence. slope=0.94, r=0.99" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 129, + 147, + 438, + 308 + ], + "blocks": [ + { + "bbox": [ + 129, + 147, + 438, + 308 + ], + "lines": [ + { + "bbox": [ + 129, + 147, + 438, + 308 + ], + "spans": [ + { + "bbox": [ + 129, + 147, + 438, + 308 + ], + "type": "image", + "image_path": "e0ce908e77a2e481e809b995710be04205bec67e9e7f5367e85b79837aa8831b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 230, + 330, + 380, + 345 + ], + "lines": [ + { + "bbox": [ + 230, + 330, + 380, + 345 + ], + "spans": [ + { + "bbox": [ + 230, + 330, + 380, + 345 + ], + "type": "text", + "content": "WIMBD Cooccurrence" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 105, + 419, + 301, + 610 + ], + "blocks": [ + { + "bbox": [ + 123, + 376, + 487, + 388 + ], + "lines": [ + { + "bbox": [ + 123, + 376, + 487, + 388 + ], + "spans": [ + { + "bbox": [ + 123, + 376, + 487, + 388 + ], + "type": "text", + "content": "Figure 10: Comparison between WIMBD and Batch Search subject-object co-occurrences" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 105, + 419, + 301, + 610 + ], + "lines": [ + { + "bbox": [ + 105, + 419, + 301, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 419, + 301, + 610 + ], + "type": "image", + "image_path": "c57e25852404ada77ca4a04964e77b1a84e1d9076ec8387246cef26b62d8f7fd.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 309, + 419, + 504, + 608 + ], + "blocks": [ + { + "bbox": [ + 309, + 419, + 504, + 608 + ], + "lines": [ + { + "bbox": [ + 309, + 419, + 504, + 608 + ], + "spans": [ + { + "bbox": [ + 309, + 419, + 504, + 608 + ], + "type": "image", + "image_path": "033cc96dfe8534d7d86dd0369b088e2fbd4344a80840dfa4285ec2df9c7c3d0f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 620, + 504, + 666 + ], + "lines": [ + { + "bbox": [ + 104, + 620, + 504, + 666 + ], + "spans": [ + { + "bbox": [ + 104, + 620, + 504, + 666 + ], + "type": "text", + "content": "Figure 11: Correlations between each feature in our regression analysis. Because of the high correlation between faithfulness metrics, we use a single dimensional PCA to attain one feature that captures " + }, + { + "bbox": [ + 104, + 620, + 504, + 666 + ], + "type": "inline_equation", + "content": "89\\%" + }, + { + "bbox": [ + 104, + 620, + 504, + 666 + ], + "type": "text", + "content": " of the variance of both for the purposes of doing feature importance tests. Note that we zero out the diagonal (which has values of 1) for readability." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "reduce the faithfulness features to one feature for the purposes of this test. Correlations are shown in Figure 11" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 198, + 78, + 405, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 78, + 405, + 97 + ], + "spans": [ + { + "bbox": [ + 198, + 78, + 405, + 97 + ], + "type": "text", + "content": "Permutation Importances" + } + ] + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 106, + 118, + 504, + 291 + ], + "blocks": [ + { + "bbox": [ + 106, + 118, + 504, + 291 + ], + "lines": [ + { + "bbox": [ + 106, + 118, + 504, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 118, + 504, + 291 + ], + "type": "image", + "image_path": "685630e8cab5d7ffaecc4c03b74e95cadb0e2941465d30626d12961382cbfa32.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 302, + 504, + 326 + ], + "lines": [ + { + "bbox": [ + 104, + 302, + 504, + 326 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 504, + 326 + ], + "type": "text", + "content": "Figure 12: Hard causality is by far the most important feature for generalizing to new relations when predicting Object frequencies, causing a change in about " + }, + { + "bbox": [ + 104, + 302, + 504, + 326 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 104, + 302, + 504, + 326 + ], + "type": "text", + "content": " accuracy." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 344, + 409, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 344, + 409, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 409, + 357 + ], + "type": "text", + "content": "F RELATIONSHIP BETWEEN CAUSALITY AND ACCURACY" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 369, + 504, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 504, + 403 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 504, + 403 + ], + "type": "text", + "content": "In this section, we provide more detail on the relationship between the formation of linear representations and accuracy on in-context learning tasks. Although the two are very highly correlated, we argue that accuracy and LRE formation are somewhat independent." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 407, + 466, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 407, + 466, + 421 + ], + "spans": [ + { + "bbox": [ + 105, + 407, + 466, + 421 + ], + "type": "text", + "content": "We show this relationship across training for OLMo-1B in Figure 13 and 7B in Figure 14." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 435, + 354, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 435, + 354, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 435, + 354, + 449 + ], + "type": "text", + "content": "G EXTENDING TO COMMONSENSE RELATIONS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 460, + 506, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 506, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 506, + 582 + ], + "type": "text", + "content": "Following Elsahar et al. (2018), we focus on factual relations because subject-object co-occurrences are shown to be a good proxy for mentions of the fact. For completeness, we consider 8 additional commonsense relations here. Results for OLMo-7B are shown in Figure 15. We show that frequency is correlated with causality score (.42) in these cases as well, but it is possible subject-object frequencies do not accurately track occurrences of the relation being mentioned. For example, in the \"task person type\" relation, the co-occurrence count of the subject \"researching history\" and the object \"historian\" does not convincingly describe all instances where the historian concept is defined during pretraining. Co-occurrences are perhaps more convincingly related to how a model learns that the outside of a coconut is brown, however (the fruit outside color relation). Therefore, we caution treating these under the same lens as the factual relations. Nevertheless, we believe these results are an interesting perspective on how a different relation family compares to factual relations." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 241, + 504, + 552 + ], + "blocks": [ + { + "bbox": [ + 241, + 228, + 369, + 239 + ], + "lines": [ + { + "bbox": [ + 241, + 228, + 369, + 239 + ], + "spans": [ + { + "bbox": [ + 241, + 228, + 369, + 239 + ], + "type": "text", + "content": "Zero Shot, 5 Shot, Causality: OLMo 1B" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 106, + 241, + 504, + 552 + ], + "lines": [ + { + "bbox": [ + 106, + 241, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 241, + 504, + 552 + ], + "type": "image", + "image_path": "aa073b9b312d72e727c34d9effd4ff74d2bc67d5454ba1aa1fb8ca90ff01e807.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 566, + 504, + 588 + ], + "lines": [ + { + "bbox": [ + 104, + 566, + 504, + 588 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 504, + 588 + ], + "type": "text", + "content": "Figure 13: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-1B" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 105, + 127, + 504, + 437 + ], + "blocks": [ + { + "bbox": [ + 241, + 114, + 369, + 123 + ], + "lines": [ + { + "bbox": [ + 241, + 114, + 369, + 123 + ], + "spans": [ + { + "bbox": [ + 241, + 114, + 369, + 123 + ], + "type": "text", + "content": "Zero Shot, 5 Shot, Causality: OLMo 7B" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 105, + 127, + 504, + 437 + ], + "lines": [ + { + "bbox": [ + 105, + 127, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 105, + 127, + 504, + 437 + ], + "type": "image", + "image_path": "ca992b76e0489ff59dc80b449abc20f4339b2b2e88a80a74be1ef2bbeaf07d28.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 451, + 504, + 473 + ], + "lines": [ + { + "bbox": [ + 104, + 451, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 504, + 473 + ], + "type": "text", + "content": "Figure 14: Zero shot, 5-shot accuracies against causality for each relation across training time in OLMo-7B" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 111, + 559, + 504, + 680 + ], + "blocks": [ + { + "bbox": [ + 110, + 534, + 408, + 547 + ], + "lines": [ + { + "bbox": [ + 110, + 534, + 408, + 547 + ], + "spans": [ + { + "bbox": [ + 110, + 534, + 408, + 547 + ], + "type": "text", + "content": "OLMo-7B 0424 Development of Commonsense LREs over Training Time" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 111, + 559, + 504, + 680 + ], + "lines": [ + { + "bbox": [ + 111, + 559, + 504, + 680 + ], + "spans": [ + { + "bbox": [ + 111, + 559, + 504, + 680 + ], + "type": "image", + "image_path": "dd50d1b038103e2aab255d65d92dd375cfbbf37543fcd2ba126437888775be19.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 144, + 690, + 464, + 703 + ], + "lines": [ + { + "bbox": [ + 144, + 690, + 464, + 703 + ], + "spans": [ + { + "bbox": [ + 144, + 690, + 464, + 703 + ], + "type": "text", + "content": "Figure 15:Commonsense relations compared to pretraining time in OLMo-7B." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 293, + 38 + ], + "type": "text", + "content": "Published as a conference paper at ICLR 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_content_list.json b/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..24aa3fca0e83bd21a29afe8e1d27fa3ca6be9be8 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_content_list.json @@ -0,0 +1,2352 @@ +[ + { + "type": "text", + "text": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices", + "text_level": 1, + "bbox": [ + 112, + 101, + 883, + 148 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Vasco Xu", + "bbox": [ + 258, + 157, + 336, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Chicago", + "bbox": [ + 223, + 174, + 372, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chicago, USA", + "bbox": [ + 251, + 189, + 344, + 204 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "vascoxu@uchicago.edu", + "bbox": [ + 217, + 205, + 377, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Henry Hoffmann", + "bbox": [ + 227, + 231, + 367, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Chicago", + "bbox": [ + 223, + 248, + 370, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chicago, USA", + "bbox": [ + 250, + 263, + 344, + 277 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "hankhoffmann@cs.uchicago.edu", + "bbox": [ + 187, + 277, + 406, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chenfeng Gao", + "bbox": [ + 640, + 157, + 759, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Northwestern University", + "bbox": [ + 616, + 174, + 785, + 188 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Evanston, USA", + "bbox": [ + 648, + 189, + 751, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "chenfenggao2029@u.northwestern.edu", + "bbox": [ + 568, + 204, + 831, + 219 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Karan Ahuja", + "bbox": [ + 648, + 231, + 751, + 247 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Northwestern University", + "bbox": [ + 614, + 247, + 784, + 262 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Evanston, USA", + "bbox": [ + 648, + 263, + 751, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "kahuja@northwestern.edu", + "bbox": [ + 609, + 277, + 790, + 292 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/21cb0fed5824f2b52eaead0f3b91c03397de145b60f7fc3fc56c8e8b6dc9ec69.jpg", + "image_caption": [ + "Figure 1: MobilePoser uses any subset of consumer mobile devices (phones, watches, earbuds) available to estimate full-body pose and global translation." + ], + "image_footnote": [], + "bbox": [ + 84, + 309, + 496, + 521 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/3c2eb974ddb24edaa9597fd4b66033fe86777991e12341a6a7db51af8002f3c2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 309, + 841, + 521 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 83, + 568, + 184, + 582 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude", + "bbox": [ + 81, + 587, + 482, + 782 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few.", + "bbox": [ + 513, + 570, + 913, + 613 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS CONCEPTS", + "text_level": 1, + "bbox": [ + 514, + 625, + 653, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Human-centered computing $\\rightarrow$ Ubiquitous and mobile computing.", + "bbox": [ + 513, + 642, + 915, + 671 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KEYWORDS", + "text_level": 1, + "bbox": [ + 514, + 681, + 620, + 696 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Motion capture, sensors, inertial measurement units, mobile devices", + "bbox": [ + 513, + 700, + 913, + 715 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 514, + 720, + 661, + 732 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Vasco Xu, Chenfeng Gao, Henry Hoffmann, and Karan Ahuja. 2024. MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices. In The 37th Annual ACM Symposium on User Interface Software and Technology (UIST '24), October 13–16, 2024, Pittsburgh, PA, USA. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3654777.3676461", + "bbox": [ + 513, + 733, + 915, + 808 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 514, + 821, + 687, + 835 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Full-body motion capture has numerous applications in gaming, fitness, and virtual and augmented reality (VR/AR), enabling immersive experiences and context-aware interactions. While vision-based approaches for 3D human pose estimation have shown great", + "bbox": [ + 511, + 840, + 915, + 897 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.12492v1 [cs.HC] 16 Apr 2025", + "bbox": [ + 22, + 263, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).", + "bbox": [ + 81, + 800, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA", + "bbox": [ + 84, + 853, + 316, + 863 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "© 2024 Copyright held by the owner/author(s).", + "bbox": [ + 84, + 864, + 303, + 875 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "ACM ISBN 979-8-4007-0628-8/24/10", + "bbox": [ + 84, + 875, + 253, + 883 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "https://doi.org/10.1145/3654777.3676461", + "bbox": [ + 84, + 883, + 272, + 895 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/696e3fe3b988e348657a5ae71dc830e60ea0483d72b1488924a444e669644195.jpg", + "image_caption": [ + "Figure 2: Real-time global pose estimation powered by MobilePoser: (A) Person with smartwatch (left wrist) waving their hands. (B) Person with smartwatch (left wrist) performing jumping jacks. (C) Person wearing a smartwatch (left wrist) and carrying a phone in their right pocket running." + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 338, + 361 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/0756f6e551704363b0a80d0018307791429a28468cb1415b01872fdd090bcd17.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 341, + 103, + 500, + 361 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/245ac487888e82f4f86eed6cb209cc3d96f68466dd6718a8c94d350761353cc9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 103, + 656, + 359 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1b2a7df0cabf3fc0e25cb50f472835af79ab269436848ca26ae321703af8357f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 658, + 103, + 787, + 359 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/3169657663aa0b2619685cc0f22df97a38dbc5e3520318143006da927e02c2d9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 787, + 103, + 911, + 359 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "promise, they require subjects to be within the camera's field of view, limiting their practicability for mobile and on-the-go applications. In contrast, inertial measurement unit (IMU) based techniques offer an attractive alternative, enabling less intrusive and occlusion-free user digitization [3].", + "bbox": [ + 81, + 436, + 482, + 507 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Commercial systems such as Xsens [45] use up to 17 special-purpose sensors to provide highly accurate pose estimations. However, such approaches are intrusive, making them undesirable for everyday use. Consequently, there has been a trend towards minimizing instrumentation. Sparse inertial pose capture methods, such as TransPose [49] and DIP [14], use 6 IMUs to achieve a balance between accuracy and practicality. Yet, these methods still require expensive and special-purpose IMUs attached to specific body joints. To enable full-body motion tracking without any external infrastructure, IMUPoser [28] leverages IMUs in devices we already carry around with us, namely smartphones, smartwatches, and earbuds. These commodity devices, however, use lower-fidelity IMUs, which compromises online performance, temporal consistency, and global translation estimation.", + "bbox": [ + 81, + 507, + 482, + 699 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we present MobilePoser, a real-time user digitization technique that tracks both poses and global movement (referred to as translation) using consumer devices (Figure 1) such as watches, phones and earbuds. To enable on-the-go motion tracking without any external infrastructure, we must address a set of unique challenges. First, the number of instrumented points is dynamically changing and sparse (at most three devices, with as few as one), making the problem highly under-constrained. Second, IMUs do not directly measure positional data, making global translation tracking non-trivial. Additionally, noise and drift from the low-cost IMUs found in commodity devices complicates pose and translation", + "bbox": [ + 81, + 700, + 482, + 853 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "estimation. Finally, such a system should operate directly on-device for real-time use, anywhere, anytime.", + "bbox": [ + 513, + 436, + 911, + 464 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "MobilePoser tackles these challenges by employing a multi-stage approach. For pose estimation, it utilizes a deep neural network (DNN) to predict full-body pose from the available IMU data, followed by a physics-based optimization step to ensure spatiotemporal consistency and plausible kinematics. This greatly helps resolve ambiguous instrumented joint motion profiles, such as differentiating between waving (Figure 2 A) versus jumping jacks (Figure 2 B) from only a single smartwatch on the wrist. To aid in generalizability, the model is trained on a large dataset of synthesized IMU measurements generated from high-quality motion capture (MoCap) data. For global translation estimation, MobilePoser employs a hybrid approach that fuses predictions from a foot contact-based method and a DNN-based method that directly regresses the root joint velocity. This combination enables accurate and robust translation estimation, even in challenging scenarios where both feet are in motion together (Figure 2 C). Importantly, MobilePoser is optimized to run on-device, achieving real-time performance of 60 frames per second on a smartphone (iPhone 15 Pro), making it suitable for mobile applications.", + "bbox": [ + 511, + 465, + 913, + 728 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, MobilePoser makes the following key contributions:", + "bbox": [ + 529, + 729, + 913, + 742 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) It presents a novel framework for inertial translation estimation using consumer devices, enabling accurate tracking of global movement without specialized hardware.", + "(2) It achieves state-of-the-art full-body pose estimation across various on-body configurations of commodity IMU devices, demonstrating robust performance with as few as one and up to three wearable devices.", + "(3) It provides an open-source implementation that runs in real-time on edge devices, making it accessible and practical for widespread use." + ], + "bbox": [ + 532, + 757, + 915, + 895 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA", + "bbox": [ + 84, + 75, + 326, + 85 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Xu, et al.", + "bbox": [ + 867, + 75, + 911, + 85 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1Note, we count the left and right earbuds as a unified single IMU stream", + "bbox": [ + 84, + 883, + 428, + 895 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/b3048a04b939af7f80f294f9a87a4c591d20d22ace67ce0015d8db97f8dd61f1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
System# Inst. JointsFPSConsumer DeviceTranslationMPJVE (cm)Jitter (102m/s3)
Xsens [45]17120×--
SIP [43]660×7.73.8
DIP [14]629××8.930.13
TransPose [49]690×7.11.4
PIP [48]660×5.90.24
IMUPoser [28]1-325×12.11.9
MobilePoser (our work)1-36010.60.97
", + "bbox": [ + 150, + 102, + 848, + 218 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1: Comparison with key prior work on the DIP-IMU dataset.", + "bbox": [ + 276, + 218, + 718, + 231 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 RELATED WORK", + "text_level": 1, + "bbox": [ + 81, + 266, + 259, + 281 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 User Digitization with External Sensors", + "text_level": 1, + "bbox": [ + 81, + 287, + 447, + 303 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Commercial motion capture systems such as OptiTrack [29] and Vicon [41] use specialized hardware, such as multiple calibrated high-speed infrared cameras, to track retroreflective markers attached to a user's body. Such setups are commonly used in games, movies and character animations that require millimeter accuracy and are the gold standard of motion capture. The expensive infrastructure required by commercial systems, makes them impractical for everyday use. Therefore, much research has been devoted to instrumentation-free approaches using monocular cameras. Such approaches generally rely on RGB [9, 13, 36] or depth [27] cameras based computer vision techniques to predict body pose.", + "bbox": [ + 81, + 304, + 482, + 458 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "There also exists specialized external hardware for pose tracking in Extended Reality (XR). For example, the HTC Vive [2], PlayStation VR [1] and Oculus Rift [32] track the head, handheld controllers and other limb-borne accessories using external sensor base stations for Virtual Reality (VR) applications. The un-sensed joints are estimated with inverse kinematics [15] or learning-based methods [16, 35]. Other non-optical external approaches for pose estimation include capacitive sensing [50], magnetic fields [31, 33], RF [51], and mechanical linkages [39].", + "bbox": [ + 81, + 458, + 482, + 583 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 User Digitization with non-IMU Worn Sensors", + "text_level": 1, + "bbox": [ + 81, + 611, + 436, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Wearable sensors provide a portable and flexible alternative to external sensors. For example, MI-Poser [7] uses magnetic tracking in wristbands and AR glasses to estimate upper-body poses. Other works have explored wrist-worn cameras [20, 44], EMG sensors [24], EIT sensors [22], wrist-worn antennas [19] and depth sensor armbands [10]. However, these works focus solely on capturing the motion of specific body parts (e.g., wrist or upper-body).", + "bbox": [ + 81, + 646, + 482, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To capture full-body motion, a popular approach is to use body-mounted cameras coupled with computer vision techniques [5, 38]. Other works have explored different sensor technologies such as ultrasonic sensors [42] and RFID [18]. Nevertheless, these works require users to wear sensors they do not already have. Pose-On-The-Go [4] addresses this by estimating full-body pose via extreme sensor fusion, leveraging a phone's front and rear cameras, thus requiring no special instrumentation. However, its computationally expensive and relies heavily on heuristics to power body poses, often resulting in unnatural motions. MobilePoser differentiates itself by focusing on full-body pose estimation using power-efficient", + "bbox": [ + 81, + 744, + 482, + 896 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IMUs already found in consumer devices, such as smartphones, smartwatches, and earbuds.", + "bbox": [ + 513, + 268, + 913, + 295 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 User Digitization with IMU Worn Sensors", + "text_level": 1, + "bbox": [ + 513, + 306, + 893, + 323 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Commercial motion capture systems, such as Xsens [45], use a large number of inertial sensors (typically 17) strapped to the body to provide high-quality motion capture. These setups consist of homogeneous, high-grade IMUs that are calibrated for noise and have known positions on the body, resulting in a less ill-posed problem compared to using sparse, heterogeneous sensors. However, such an approach is highly inconvenient and intrusive for everyday use.", + "bbox": [ + 511, + 325, + 913, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To address this limitation, researchers have explored reconstructing human motions from a reduced number of sensors. Works such as SIP [43], DIP [14], PIP [48], TIP [17], and TransPose [49] have demonstrated the feasibility of using only 6 commercial-grade Xsens IMU sensors for full-body motion capture. Works have further explored integrating other input modalities (e.g. UWB [8] and egocentric images [47]) in addition to the 6 IMUs for increased performance. All these approaches leverage the homogeneity and known calibrated positions of the sensors to achieve accurate pose estimation. However, even 6 sensors can be cumbersome for on-the-go applications, especially those that require passive sensing.", + "bbox": [ + 511, + 422, + 913, + 575 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent research has investigated even sparser IMU configurations using commodity devices. IMUPoser [28], which is most closely related to our work, performs pose estimation using any combination of smartphone, smartwatch, and earbuds. While IMUPoser tackles the challenges of heterogeneous sensor quality for pose estimation, it lacks global translation due to IMU noise and drift, and contains unrealistic spatio-temporal motion artifacts. Additionally, IMUPoser runs on a laptop at $25\\mathrm{Hz}$ , limiting its practicality for real-time mobile applications.", + "bbox": [ + 511, + 575, + 913, + 700 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In contrast, MobilePoser addresses these limitations by demonstrating improved pose estimation accuracy on widely used benchmarks while also estimating global translation (see Table 1). Furthermore, our system is designed to run fully on-device, achieving real-time performance of 60 fps on edge mobile devices. This enables MobilePoser to provide a more practical and accessible solution for on-the-go motion capture using commodity devices.", + "bbox": [ + 511, + 699, + 913, + 797 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 MOBILEPOSER", + "text_level": 1, + "bbox": [ + 513, + 806, + 671, + 821 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Estimating a user's full-body pose from a sparse set of IMU observations is a severely under-constrained problem as it aims to infer a high-dimensional quantity, i.e., the full-body pose, from low-dimensional observations that only capture partial motion at each instrumented point. Moreover, multiple possible solutions could", + "bbox": [ + 511, + 825, + 913, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices", + "bbox": [ + 83, + 75, + 648, + 85 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA", + "bbox": [ + 669, + 75, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/82d7e3785f82d268bc65de4155687f47ebd6e5f7505f1700e0b66ace8a9d59c2.jpg", + "image_caption": [ + "Figure 3: MobilePoser system overview. MobilePoser accepts any available subset of IMU data from the user and masks absent devices by setting their values to zero. The IMU data is then fed into two main modules: (1) Pose Estimation, which first estimates joint positions followed by joint rotations, and (2) Translation Estimation, which combines foot-ground contact probabilities with a direct neural network-based approach to regress global velocity. Finally, a Physics Optimizer refines the predicted joint rotations and global translation to ensure they satisfy physical constraints." + ], + "image_footnote": [], + "bbox": [ + 101, + 108, + 883, + 308 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "explain the observed data, making it challenging to determine the correct pose. To tackle these challenges, we introduce MobilePoser, a system that leverages data-driven learning and physics-based optimization to estimate accurate and plausible full-body poses and global translations from sparse IMU inputs. Figure 3 provides an overview of our pipeline, which we describe in detail in the following sections.", + "bbox": [ + 81, + 419, + 483, + 518 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 System Input", + "text_level": 1, + "bbox": [ + 83, + 545, + 236, + 560 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MobilePoser takes as input acceleration and orientation readings from IMUs across any subset of three consumer devices: smartphones, smartwatches, and earbuds. Each of these devices can be placed at different body locations, resulting in various possible combinations. For instance, a smartphone can be stored in the left or right pocket, held in the left or right hand, placed next to the head during a call, or not carried by the user at all. Similarly, smartwatches can be worn on either wrist or not worn at all, while earbuds can be worn, placed in a charging case stored in either pocket, or not carried by the user.", + "bbox": [ + 81, + 563, + 482, + 700 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Following IMUPoser [28], we consider 24 plausible device-location combinations across five body locations: right pocket, left pocket, right wrist, left wrist, and head. These combinations cover the various ways users might carry or wear their devices throughout the day. Regardless of the input device combination, our model expects IMU data from the five predefined body locations.", + "bbox": [ + 81, + 700, + 483, + 784 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The IMU signal at each location consists of acceleration (3 values) and orientation (a $3 \\times 3$ rotation matrix), resulting in a total of 12 IMU values per location. Across all five locations, this yields an input vector $x \\in \\mathbb{R}^{60}$ . However, since at any given time only a subset of 1-3 devices may be present, data from absent devices is masked and set to zero. This masking approach allows us to build a unified model that can handle the varying number of available devices and their changing on-body location seamlessly. This further eliminates", + "bbox": [ + 81, + 785, + 483, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "the need for training separate models for each possible combination, making the system more practical and efficient.", + "bbox": [ + 513, + 419, + 915, + 446 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Full-Body Pose Estimation", + "text_level": 1, + "bbox": [ + 514, + 459, + 774, + 474 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To learn a mapping from IMU input to full-body pose, we employ a data-driven, multi-stage neural network approach. Specifically, our pose estimation network consists of two submodules: Joint predictor $(\\mathcal{F}^{joint})$ and Rotation predictor $(\\mathcal{F}^{\\theta})$ . More specifically, $\\mathcal{F}^{joint}$ estimates joint positions as an intermediate task and $\\mathcal{F}^{\\theta}$ solves for the joint angle orientations. Both submodules use a bidirectional LSTM (bi-LSTM), to model both spatial and temporal information [14]. We input data into both submodules in a sliding-window fashion with window length $N$ .", + "bbox": [ + 511, + 478, + 915, + 604 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Joint Pose Estimation $(\\mathcal{F}^{joint})$ . This module estimates the joint positions from a sequence of IMU measurements. We explicitly estimate joint positions as an intermediate step, as it helps extract useful information from linear accelerations due to its linear correlation with joint positions [49]. The input to $\\mathcal{F}^{joint}$ is $x^{imu}(t) = [x_{t-N}, \\ldots, x_t]$ , where $t$ is the current time step and $N$ is the time window length. The output are the root (pelvis) relative 3D positions of the 24 SMPL body joints [25] $\\pmb{p}(t) = [\\pmb{p}_{t-N}, \\ldots, \\pmb{p}_t] \\in \\mathbb{R}^{N \\times 72}$ . The loss function used to train this network is:", + "bbox": [ + 513, + 611, + 915, + 736 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {j o i n t}} = \\left\\| \\mathbf {p} - \\mathbf {p} _ {G T} \\right\\| _ {2} ^ {2} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 647, + 741, + 911, + 758 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where the subscript $GT$ denotes the ground truth and $p$ represents the full-body SMPL joint positions.", + "bbox": [ + 513, + 762, + 913, + 790 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.2 Joint Rotation and Body Mesh Estimation $(\\mathcal{F}^{\\theta})$ . Here we employ a neural kinematic estimator to regress joint rotations from the previously estimated positions. We concatenate the joint coordinates from $\\mathcal{F}^{joint}$ with IMU measurements, which serves as the input to $\\mathcal{F}^{\\theta}$ . Note, while the SMPL body encodes 24 joints, only 18 are relevant from a rotation prediction perspective as the fingers, wrist and toes are independent of the on-body IMUs and", + "bbox": [ + 511, + 797, + 915, + 896 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA", + "bbox": [ + 84, + 75, + 326, + 85 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Xu, et al.", + "bbox": [ + 867, + 75, + 911, + 85 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "are hence set to identity rotation matrices [49]. The outputs of the network are the 18 root relative joint orientations represented as 6D rotations: $\\pmb{\\theta}(t) = [\\pmb{\\theta}_{t-N},\\dots,\\pmb{\\theta}_t] \\in \\mathbb{R}^{N \\times 108}$ .", + "bbox": [ + 81, + 106, + 480, + 148 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our joint rotation loss consists of three terms: $\\mathcal{L}_{ori}$ , $\\mathcal{L}_{pos}$ , $\\mathcal{L}_{jerk}$ . The loss term $\\mathcal{L}_{ori}$ is a standard L2 loss from the ground truth joint rotations. The term $\\mathcal{L}_{pos}$ penalizes error accumulating along the kinematic chain. Finally, $\\mathcal{L}_{jerk}$ promotes temporally smooth predictions, where $jerk(\\theta) = \\theta_{t-3} + 3\\theta_{t-2} - 3\\theta_{t-1} + \\theta_t$ is a function that computes the jerk of a signal $\\theta$ at time step $t$ , penalizing the deviation between neighboring frames [49].", + "bbox": [ + 81, + 148, + 482, + 244 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our combined joint rotation loss function can be represented as,", + "bbox": [ + 98, + 244, + 482, + 258 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\theta} = \\mathcal {L} _ {\\text {o r i}} + \\mathcal {L} _ {\\text {p o s}} + \\lambda \\mathcal {L} _ {\\text {j e r k}} \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 204, + 263, + 482, + 281 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {o r i}} = \\left\\| \\theta - \\theta_ {G T} \\right\\| _ {2} ^ {2} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 196, + 282, + 482, + 301 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {p o s} = \\left\\| \\mathrm {F K} (\\theta) - \\mathbf {p} _ {G T} \\right\\| _ {2} ^ {2} \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 192, + 303, + 482, + 320 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {j e r k} = \\sum_ {t} ^ {T} j e r k (\\theta) \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 187, + 323, + 482, + 359 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $FK(\\cdot)$ is the forward kinematics function, that computes joint coordinates from joint rotations. Given the joint rotations, the parametric SMPL body model generates a corresponding body mesh with 6890 vertices.", + "bbox": [ + 81, + 364, + 482, + 420 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Global Translation Estimation", + "text_level": 1, + "bbox": [ + 83, + 431, + 375, + 446 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Translation estimation from IMUs is challenging as they lack direct distance measurements. Moreover, IMUs are prone to noise and biases, which causes techniques such as double-integration of acceleration to rapidly accumulate errors [46]. Therefore, inspired by prior work [23, 48, 49], we estimate per-frame velocity of the root joint using two submodules: a foot-ground contact $(v_{f})$ and a neural network based root velocity estimator $(v_{e})$ . We fuse the output of the two submodules to obtain a final estimate of global translation.", + "bbox": [ + 81, + 450, + 482, + 575 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "3.3.1 Foot-Ground Contact based Root Velocity $(v_{f})$ . Here we estimate the probability of each foot contacting the ground independently using a bi-LSTM network. The input to the model is the concatenated vector of joint positions and IMU measurements. The output of the network is the likelihood that each foot is contacting the ground, denoted as $c_{foot} = [c_{lfoot}, c_{rfoot}] \\in \\mathbb{R}^2$ . The foot with the higher foot-ground contact probability is defined as the supporting foot, $s = \\max \\{c_{\\mathrm{foot}}, c_{\\mathrm{rfoot}}\\}$ . The root velocity, $v_{f}(t) \\in \\mathbb{R}^{3}$ , is then computed as the coordinate difference of the supporting foot between consecutive frames. This approach helps capture natural body motions, as movement is significantly influenced by the supporting foot's dynamics [37]. For example, when walking, the body's movement is propelled forward and stabilized by the foot contacting the ground. The network is trained using binary cross-entropy loss.", + "3.3.2 Neural Network based Root Velocity $(v_{e})$ . While the supporting foot contact based method yields plausible human movement, it inherently fails when both feet are not contacting the ground (e.g., when running or jumping). To accommodate such cases, we estimate per-frame root velocity directly using a neural network. We again use the predicted joint coordinates and IMU measurements as input. Compared to previous submodules that use a bi-LSTM for" + ], + "bbox": [ + 81, + 583, + 482, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "prediction, this module uses a unidirectional LSTM due to its capacity to capture longer historical context. The output is per-frame root velocity, denoted as $v_{e}(t) \\in \\mathbb{R}^{3}$ . The network is trained using a cumulative L2 loss [49].", + "bbox": [ + 511, + 106, + 915, + 162 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3.3 Module Fusion. Both modules offer different trade-offs in terms of predicting translation. Supporting foot provides more realistic estimates by leveraging human kinematics but fails when both feet are off the ground. On the other hand, directly estimating root velocity is more general but is highly prone to unnatural movements such as foot sliding [52]. To achieve the benefits of both, we adopt the heuristic-based fusion approach, inspired by TransPose [49]. In summary, when the foot contact $c$ is higher than an upper-threshold $\\overline{q}$ , we are confident of ground contact by a foot and hence we rely on $(v_{f})$ for translation estimation. When the foot contact is below a lower-threshold, $q$ , we rely on $(v_{e})$ . For intermediate probabilities, we fuse both velocity estimations using a weighted sum, to output the final global velocity estimate $v$ :", + "bbox": [ + 513, + 170, + 915, + 352 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nv = \\frac {q - \\bar {q}}{\\underline {{q}} - \\bar {q}} v _ {e} + \\frac {q - \\underline {{q}}}{\\bar {q} - \\underline {{q}}} v _ {f} \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 642, + 357, + 913, + 390 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Following previous work [49], we use $q = 0.5$ and $\\overline{q} = 0.9$ .", + "bbox": [ + 513, + 397, + 864, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 Physics-Aware Refinement", + "text_level": 1, + "bbox": [ + 514, + 425, + 779, + 440 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our pose and translation estimation networks output the user's global pose based on a history of IMU measurements. When trained on sufficiently large amounts of data, the full-body pose estimation and global translation estimation neural networks learn the human motion manifold and produce realistic poses. However, despite the best modeling efforts, the outputs may still contain inter-mesh penetration, temporal artifacts such as jitter, foot-floor penetration and foot skating. To address these issues, we add an off-the-shelf physics motion optimizer [48]. The physics optimizer uses two proportional derivative (PD) controllers to compute the desired acceleration of the simulated character that best reproduces the estimated pose while satisfying physical constraints, such as the equation of motion [12]. The inputs to the physics optimizer are the estimated joint angles $\\theta$ , the foot-ground contact probabilities $c_{foot}$ , and the neural network based root velocity $v_{e}$ . The outputs are the optimized joint angles and global translation with reduced jitter and foot-ground penetration (Figure 4). For a detailed overview of the physics optimizer, we refer readers to PIP [48].", + "bbox": [ + 511, + 443, + 913, + 693 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/a5fdf6041545d3658902b169cb999bb2423047dfed789e142ee272c001e87cc9.jpg", + "image_caption": [ + "Figure 4: Demonstration of the physics optimizer's ability to reduce foot-ground penetration." + ], + "image_footnote": [], + "bbox": [ + 517, + 715, + 913, + 851 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices", + "bbox": [ + 83, + 75, + 648, + 85 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA", + "bbox": [ + 669, + 75, + 913, + 87 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.5 Real-time Inference", + "text_level": 1, + "bbox": [ + 81, + 104, + 290, + 119 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We implement proof-of-concept applications in iOS, using an Apple iPhone 15 Pro, Apple Watch Series 9 and Apple AirPods Pro. The iPhone, Apple Watch and AirPods sample IMU data at 60, 60 and $25\\mathrm{Hz}$ respectively. For uniformity, we convert all the IMU data to $60\\mathrm{Hz}$ by upsampling the AirPods.", + "bbox": [ + 81, + 125, + 480, + 193 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We employ the active device selection strategy proposed by IMUPoser [28], wherein the UWB and inertial data is used to track the active devices and their on-body locations. For initial prototyping, the Apple Watch and AirPods communicate over Bluetooth to the iPhone, which streams data to a MacBook Air 2022 via socket. Post connection, a small calibration step is performed to align the IMU measurements with the training data, similar to prior work [14, 28, 49]. Following the setup, data is streamed to the laptop for pre-processing, inference and then relayed to Unity applications for visualization.", + "bbox": [ + 81, + 193, + 480, + 330 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To further prototype an on-device edge model, we convert our trained PyTorch model into CoreML with mixed precision quantization and evaluate its performance. On an iPhone 15 Pro, our model incurs $\\sim 14\\mathrm{ms}$ model inference time running at $60\\mathrm{Hz}$ , capped by input IMU sampling rate.", + "bbox": [ + 81, + 330, + 480, + 402 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 DATA SYNTHESIS AND MODEL TRAINING", + "text_level": 1, + "bbox": [ + 81, + 412, + 470, + 426 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Model training requires a large collection of synchronized IMU measurements and corresponding SMPL body poses. We leverage the AMASS [26] MoCap dataset, which provides an extensive collection of such data(~40 hours), including translation.", + "bbox": [ + 81, + 431, + 482, + 487 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Full-Body Pose Estimation", + "text_level": 1, + "bbox": [ + 81, + 498, + 344, + 513 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Our models expect IMU measurements as input. We synthesize IMU data following the approach proposed in DIP [14]. In summary, we place virtual sensors on the corresponding SMPL mesh vertices (left and right wrists, left and right pockets, and the head) and obtain joint rotations via limb orientations, while acceleration values are computed using finite differences. During training, we scale down the acceleration by a factor of $30m / s^2$ , such that its values are on a similar scale to orientations, for better learning. Of note, we do not normalize our IMU measurements to a root joint (e.g., the pelvis), as the number of available devices can vary.", + "bbox": [ + 81, + 516, + 482, + 655 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Global Translation Estimation", + "text_level": 1, + "bbox": [ + 81, + 667, + 375, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The translation estimation networks require (1) binary labels for foot-ground contact states and (2) per-frame root velocity values. To generate foot-ground contact states, we assume that a foot in contact with the ground displays very little movement between frames. Therefore, when the movement of one foot between consecutive frames is less than a threshold $u$ , then we consider it to be contacting the ground. We set $u = 0.008$ , following previous work [49]. To train $v_{e}$ , we require per-frame root velocities. Since the AMASS dataset provides root position data, we can compute root velocities as the coordinate difference of the root position between consecutive frames.", + "bbox": [ + 81, + 685, + 482, + 837 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 Training Setup and Procedure", + "text_level": 1, + "bbox": [ + 81, + 849, + 370, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We train our models on a NVIDIA A40 GPU, which takes roughly a day for all modules and device-combinations. In total, our model has", + "bbox": [ + 81, + 867, + 480, + 896 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\sim 6.7M$ trainable parameters. Each module is trained separately using a batch size of 256 and the Adam optimizer [21] with a learning rate of $\\mathrm{lr} = 10^{-3}$ for 80 epochs. We also apply a gradient clipping with norm of 1, to prevent the gradients from exploding.", + "bbox": [ + 511, + 106, + 913, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "During training of $\\mathcal{F}^{\\theta}$ , $v_{e}$ , and $v_{f}$ , we add Gaussian noise with $\\sigma = 0.04$ to the joint positions to prevent overfitting and deal with prediction errors from $\\mathcal{F}^{joint}$ . We empirically set $\\lambda = 10^{-5}$ when training $\\mathcal{F}^{\\theta}$ , to encourage temporally smooth predictions.", + "bbox": [ + 511, + 162, + 913, + 219 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 EVALUATION", + "text_level": 1, + "bbox": [ + 513, + 231, + 660, + 244 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We systematically isolate and analyze the efficacy of MobilePoser across different datasets, evaluation metrics and protocols. We show both qualitative and quantitative results, and also run ablation studies to evaluate our translation estimation design choices.", + "bbox": [ + 511, + 250, + 913, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Datasets", + "text_level": 1, + "bbox": [ + 514, + 318, + 627, + 332 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate MobilePoser on three real-world, inertial datasets, summarized in Table 2:", + "bbox": [ + 513, + 335, + 913, + 364 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- DIP-IMU [14] contains data from 10 participants, collected using commercial-grade Xsens [45] IMUs at $60\\mathrm{Hz}$ . It includes a rich variety of activities such as arm raises, stretches, lunges, squats, and punches. However, DIP-IMU does not contain global translation data.", + "- TotalCapture [40] provides real IMU measurements with ground-truth pose and translation, captured using commercial Xsens IMUs at $60\\mathrm{Hz}$ . Following PIP [48], we re-calibrate the acceleration measurements to account for constant bias.", + "- IMUPoser [28] is collected from 10 participants using consumer-grade devices: an iPhone 11 Pro, Apple Watch Series 6, and AirPods, at $25\\mathrm{Hz}$ . It provides ground-truth pose and global translation data." + ], + "bbox": [ + 540, + 368, + 921, + 546 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2 Full-Body Pose Estimation", + "text_level": 1, + "bbox": [ + 514, + 564, + 774, + 580 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2.1 Evaluation Metrics. Like prior work, we use the following evaluation metrics for pose estimation (lower is better for all):", + "bbox": [ + 513, + 583, + 913, + 611 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Mean Per Joint Rotation Error (MPJRE): Measure of mean angular error across all root aligned joints in degrees $(^{\\circ})$ .", + "- Mean Per Joint Position Error (MPJPE): Measure of mean Euclidean distance error across all root aligned joints in centimeters (cm).", + "- Mean Per Joint Vertex Error (MPJVE): Measure of mean Euclidean distance error across all root aligned vertices of the SMPL body mesh in centimeters (cm).", + "- Mean Per Joint Jitter (Jitter): Measure of mean jerk across all body joints of the predicted motion in $m / s^3$ ." + ], + "bbox": [ + 540, + 614, + 913, + 752 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use MPJVE as our primary metric of evaluation for ease of comparison with prior work [28].", + "bbox": [ + 513, + 755, + 913, + 784 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/99ed87bf7a0cfd7d8516898be6c0c8c9a3be2623360a9c68576d7a576fcbccca.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetCapture DeviceTranslationData FPS
DIP-IMUCommercial×60 Hz
TotalCaptureCommercial60 Hz
IMUPoserConsumer25 Hz
", + "bbox": [ + 532, + 804, + 895, + 862 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Real-world IMU datasets for MobilePoser Evaluation.", + "bbox": [ + 514, + 863, + 913, + 875 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA", + "bbox": [ + 83, + 75, + 326, + 87 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Xu, et al.", + "bbox": [ + 867, + 75, + 911, + 85 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ede03b447a6f857c0266e6eff4f01b390fd362031a467d8cdaa8a83ca3d76bf3.jpg", + "image_caption": [ + "Figure 5: Comparison of MobilePoser's Full-Body Pose Estimation Error across different Evaluation Protocols on the DIP-IMU, IMUPoser and TotalCapture dataset respectively." + ], + "image_footnote": [], + "bbox": [ + 107, + 104, + 455, + 287 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2.2 Evaluation Protocol. We outline three evaluation protocols for training and fine-tuning to evaluate MobilePoser's efficacy across different data sources and noise profiles.", + "bbox": [ + 81, + 369, + 482, + 411 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Base Model: We train our model on the synthetic data generated on the AMASS dataset.", + "- Finetune DIP-IMU: Like prior work, we train on AMASS and then fine-tune on 8 DIP-IMU participants. The 2 holdout participants are used for testing the Finetune DIP-IMU model on the DIP-IMU dataset.", + "- Finetune IMUPoser: We train on AMASS and fine-tune on the first 8 IMUPoser participants. The 2 holdout participants are used for testing the Finetune IMUPoser model on the IMUPoser dataset." + ], + "bbox": [ + 109, + 414, + 482, + 551 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.2.3 Accuracy across Datasets. Figure 5 shows our full-body pose estimation accuracy for all three protocols across the three datasets listed in Section 5.1. Averaged across all three datasets, the MPJVE for the Base Model, Finetune DIP-IMU and Finetune IMUPoser protocols are 11.89, 11.73 and $11.33\\mathrm{cm}$ respectively. It is interesting to note that the addition of commercial-grade IMU data (Finetune DIP-IMU) only improves accuracy by $1.3\\%$ over the base model, while the addition of noisy IMU data from consumer devices (Finetune IMUPoser) results in a bigger improvement of $4.7\\%$ .", + "bbox": [ + 81, + 561, + 482, + 686 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "5.2.4 Accuracy across Activities. We further analyze results on different activities on the IMUPoser dataset, as it provides activity label meta-data. MobilePoser's accuracy generalizes across most everyday activity contexts: the error (MPJVE) for locomotion is 8.2 cm (walking 7.6 cm, jogging 8.8 cm), exercises is 10 cm (kicking: 7.5 cm, jumping jacks: 11.1 cm, boxing: 11.5 cm), sitting is 11.5 cm and freestyle motions such as tennis and basketball are 9.1 cm and 11.7 cm respectively. The accuracy degrades for postures with the user lying/facing down, e.g. push-ups have higher error of 16.1 cm.", + "5.2.5 Comparison with prior work. To aid in direct comparison with prior work [14, 28, 48, 49], we now make use of the Finetune DIP-IMU evaluation protocol, that is training a base model on the synthetic IMU data from AMASS and fine-tuning it on the 8 participants from DIP-IMU dataset. Tables 1 and 3 offer a quantitative" + ], + "bbox": [ + 81, + 694, + 482, + 896 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f1aa1fb87ee39eb83a3a850d85c7930c38f8b9d2834c06689b08d09eb640adb7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
System# Inst. JointsMPJREMPJVEJitter
DIP617.2°11.23.62
TransPose612.8°7.40.95
PIP612.1°6.50.20
IMUPoser1-325.6°15.41.30
MobilePoser1-323.7°12.60.55
", + "bbox": [ + 540, + 102, + 887, + 189 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Comparison with key prior work on the TotalCapture dataset.", + "bbox": [ + 514, + 190, + 915, + 215 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "comparison against key prior work, evaluated on the DIP-IMU and TotalCapture, dataset respectively. Given that our system targets a very sparse configuration of IMUs (1-3), it is unsurprising that we perform worse than systems utilizing 6 IMUs, strategically placed around the body. On the DIP-IMU and TotalCapture dataset, compared to IMUPoser, which considers the same device-location combinations, we perform significantly better displaying a $12.4\\%$ and $18.2\\%$ decrease in vertex error respectively.", + "bbox": [ + 511, + 257, + 913, + 367 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "On the IMUPoser dataset, Figure 7 (A) provides a detailed breakdown of accuracy for different on-body device locations. Averaging across the 1, 2 and 3 device conditions, MobilePoser outperforms IMUPoser by $24.1\\%$ , $14.2\\%$ and $8.7\\%$ respectively. Furthermore, Figure 7 (B) provides an accuracy breakdown for the instrumented and non-instrumented joints in comparison with IMUPoser. If a limb has an IMU placed on any part, we consider all the joints pertaining to it as instrumented joints, while the rest are marked as non-instrumented. MobilePoser is $18.1\\%$ and $17.4\\%$ better than IMUPoser for predicting instrumented and non-instrumented joints respectively. This can be seen in Figure 6 which depicts a visual comparison of our pose estimation with IMUPoser.", + "bbox": [ + 511, + 367, + 913, + 534 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/41dd9e935a6e838257beb11b887e0433dc3580897df48f9c7643911446cbaaf3.jpg", + "image_caption": [ + "Figure 6: Qualitative comparisons between our method and IMUPoser on the DIP-IMU and IMUPoser dataset." + ], + "image_footnote": [], + "bbox": [ + 545, + 551, + 883, + 851 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices", + "bbox": [ + 83, + 75, + 648, + 85 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA", + "bbox": [ + 669, + 75, + 913, + 87 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/24a6ce53daf981e05ba77dd84c8966fdf8230686765f953522e471c00433664e.jpg", + "image_caption": [ + "Figure 7: MPJVE comparison between IMUPoser and MobilePoser (our system) on the IMUPoser Dataset for: (A) Different on-body device combinations (B) Instrumented vs Non Instrumented joints." + ], + "image_footnote": [], + "bbox": [ + 86, + 108, + 911, + 381 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 Global Translation Estimation", + "text_level": 1, + "bbox": [ + 83, + 446, + 375, + 459 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "5.3.1 Evaluation Protocol. We evaluate our Global Translation Estimation module on the TotalCapture and IMUPoser datasets, as DIP-IMU lacks translation data. Like prior work [48, 49], we use the Finetune DIP-IMU protocol (Section 5.2.2), that is we train on AMASS and fine-tune on 8 participants of DIP-IMU to track the Root Translation Error (Euclidean norm of the cumulative distance errors within 1 second).", + "5.3.2 Accuracy across Datasets and Body Regions. On the Total-Capture and IMUPoser dataset, our mean root translation error across all device combinations is 27.55 and $17.63\\mathrm{cm}$ respectively. Interestingly, for both IMUPoser and TotalCapture datasets, we observe only a slight decrease in error when increasing the number of devices from one to two $(6.1\\%)$ and no significant improvement $(4.0\\%)$ when increasing from two devices to three. Analysing the error across different body regions for the single device scenario" + ], + "bbox": [ + 81, + 464, + 482, + 681 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/36e1570002c8570b361bdc0aa62d67edbc98118850cfb1dbe5dae9fc5bd7f18b.jpg", + "image_caption": [ + "Figure 8: (A) Comparison of cumulative translation error for different instrumented joints on the IMUPoser and Total-Capture dataset. (B) Evaluation of cumulative distance errors with respect to time." + ], + "image_footnote": [], + "bbox": [ + 81, + 704, + 480, + 825 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "(Figure 8) (A), we see that a device in the pocket has a much lower error $(14.8\\mathrm{cm})$ compared to that on the wrist $(25.7\\mathrm{cm})$ or the head $(29.7\\mathrm{cm})$ . This can be attributed to the legs capturing most of the locomotion data during translation, resulting in marginal gains from sensors on the upper-body. Figure 8 (B) shows the the cumulative distance error over time.", + "bbox": [ + 511, + 446, + 915, + 529 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3.3 Ablation Study. We perform ablation studies to understand the impact of key components in our system and their effects on performance. At the core of our system lies a subtle yet powerful concept: higher-order digitization (e.g., body pose) improves lower-order digitizations (e.g., steps). To quantify this idea, we run an ablation study of our translation estimation technique using both IMU data and the corresponding full-body pose inferred from it versus using only IMU data. Figure 9 summarizes our results. Our IMU-only, direct regression has an error of $21.4\\mathrm{cm}$ across both", + "bbox": [ + 511, + 537, + 915, + 662 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7b97abca80b72e72c22ffcf61d1df2388fd2a3e720e8c40f16839719378dbc45.jpg", + "image_caption": [ + "Figure 9: Benefits of using high-order digitization (i.e., IMU inferred poses) for estimating global translation." + ], + "image_footnote": [], + "bbox": [ + 553, + 681, + 875, + 851 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA", + "bbox": [ + 84, + 75, + 326, + 85 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Xu, et al.", + "bbox": [ + 867, + 75, + 911, + 85 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9a40e3573eaab617538725d13e6c909e5c242d39f4c53f843dea1f2131d3bbad.jpg", + "image_caption": [ + "Figure 10: Example indoor navigation application where MobilePoser digitizes multiple users within an office space." + ], + "image_footnote": [], + "bbox": [ + 84, + 104, + 282, + 325 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/468b8d082bb0ee2c8c8e2bf1858a5b7eddc0b0a61d02d612a1e9d13acd5a000c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 282, + 104, + 480, + 325 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "datasets, while our integrated $(\\mathrm{IMU} + \\mathrm{IMU}$ inferred pose) approach decreases error by $29.4\\%$ to $15.1~\\mathrm{cm}$ .", + "bbox": [ + 81, + 393, + 480, + 421 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Building on the multi-stage architecture, we further evaluate the impact of two additional components: jerk loss and physics refinement. These elements were designed to enhance motion smoothness and physical plausibility. For the IMUPoser dataset, the jerk loss reduces jitter by $23.9\\%$ and translation error by $3.33\\%$ , but increases mean pose error by $0.05\\%$ . Further, the physics-aware refinement reduces jitter by $29.7\\%$ and translation error by $0.4\\%$ , but increases the mean pose error by $0.7\\%$ . The negligible increase in mean pose error is expected, as it may occasionally over-smooth the motion. This phenomenon is also seen in the PIP [48]. We believe that significant improvements in jitter and translation far outweigh the minimal increase in pose error, resulting in a more realistic motion.", + "bbox": [ + 81, + 422, + 482, + 588 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3.4 Comparison with prior work. To the best of our knowledge, no other works have explored both full-body pose and translation from such a sparse set of commodity IMUs. IMUPoser [28], which also targets consumer devices, does not estimate global translation. On the TotalCapture dataset, TransPose (6 IMUs) has a translation error of $12.8\\mathrm{cm}$ while that of MobilePoser is $19.9\\mathrm{cm}$ when a single IMU device is placed in the pocket. Unsurprisingly, a commercial grade, 6 IMU-based system has higher accuracy due to their waist and knee mounted sensors, which capture larger ranges of locomotion compared to devices carried in the pocket.", + "bbox": [ + 81, + 595, + 482, + 734 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 EXAMPLE USES", + "text_level": 1, + "bbox": [ + 83, + 748, + 246, + 762 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "MobilePoser enables full-body pose estimation with global motion tracking using devices that users already own, opening up a wide range of novel applications. This section showcases three proof-of-concept applications in indoor navigation, gaming, and healthcare to illustrate MobilePoser's unique capabilities and potential impact.", + "bbox": [ + 81, + 766, + 482, + 835 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.1 Indoor Localization and Navigation", + "text_level": 1, + "bbox": [ + 81, + 849, + 415, + 864 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To demonstrate MobilePoser's potential in this domain, we scan an office space using the PolyCam [34] LiDAR scanner app with", + "bbox": [ + 81, + 867, + 480, + 896 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8bf1542bdaa768fc5e2a80f6613166a3fb2954882e1c0b9e4d765ccec6f2611a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 104, + 728, + 255 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9ed4af4fa541a6111eb81d829e2a5dee85f3efe408f46523f63cfff3248702bf.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 104, + 911, + 255 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2af18b4f56ea893a7d4db8d091c0a22c68ebfee9c664430ea05299bdf5efa3c4.jpg", + "image_caption": [ + "Figure 11: In this table tennis game users can move around the table freely and use their wrist-instrumented hand to control their racket." + ], + "image_footnote": [], + "bbox": [ + 517, + 256, + 728, + 407 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/b463e6ba1e085e714896d2c0eb817f52b2064dd728a766208e5c2a436692c40d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 728, + 256, + 911, + 407 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "an Apple iPhone 15 Pro. As shown in Figure 10, multiple users walk through the virtual office space, with their interactions and movements seamlessly digitized and represented in real-time. Here, one user has a phone in their pocket and a watch on their wrist, while the other two only have a phone in their pocket. By leveraging the IMUs in these consumer devices, MobilePoser enables accurate indoor navigation and localization without the need for additional infrastructure or specialized hardware. This opens up exciting possibilities for applications such as indoor way finding, context-aware virtual assistants, and immersive virtual tours.", + "bbox": [ + 511, + 487, + 913, + 626 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.2 Mobile Gaming Experiences", + "text_level": 1, + "bbox": [ + 513, + 638, + 785, + 655 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To showcase this potential, we developed a virtual table tennis game (Figure 11) that allows users to play remotely with others, similar to how Nintendo games are played in front of a TV. Each player has a phone in their pocket and a watch on the dominant (left) hand, which is controlling the racket. Players can freely move within their local space to control their avatars, adding a new level of physical interaction to the gaming experience. MobilePoser's ability to track full-body movements using everyday devices eliminates the need for specialized controllers, making immersive gaming experiences more accessible to a wider audience.", + "bbox": [ + 511, + 657, + 913, + 795 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.3 Fitness and Wellness", + "text_level": 1, + "bbox": [ + 513, + 808, + 728, + 821 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "MobilePoser has the potential to revolutionize fitness tracking and rehabilitation by providing accurate, real-time feedback on a user's movements and poses without the need for external sensors or camera setups. This enables users to monitor their exercise form, track progress, and receive personalized guidance using the devices", + "bbox": [ + 511, + 825, + 913, + 896 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices", + "bbox": [ + 83, + 75, + 648, + 85 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA", + "bbox": [ + 669, + 75, + 913, + 87 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/d8de858657724951663c9cad16d40d80050174bb0830339947de87e3c6a36abc.jpg", + "image_caption": [ + "Figure 12: MobilePoser's full-body pose and locomotion can be used to automatically detect and count exercise repetitions, better estimate calories and monitor form." + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 478, + 325 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "they already own. In this example (Figure 12), a user performs a workout routine while MobilePoser captures the session using the IMU data from the smartphone in the user's pocket. This not only allows the user to review their performance and track progress over time but also enables remote monitoring by fitness instructors or physical therapists. Moreover, MobilePoser's ability to track full-body movements facilitates interactive rehabilitation regimens [4] and other passive health sensing applications such as gait analysis [30] or hyperactivity detection [6], among others.", + "bbox": [ + 81, + 404, + 482, + 530 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7 OPEN SOURCE", + "text_level": 1, + "bbox": [ + 83, + 542, + 238, + 555 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To enable other researchers and practitioners to build upon our work, we release our pre-trained models, data pre-processing scripts, and model training code as open-source software at: https://github. com/SPICExLAB/MobilePoser. By making our work fully reproducible and extensible, we hope to accelerate research and development in the field of mobile motion capture using everyday devices.", + "bbox": [ + 81, + 560, + 482, + 643 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "8 LIMITATIONS AND FUTURE WORK", + "text_level": 1, + "bbox": [ + 81, + 655, + 413, + 669 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "While MobilePoser demonstrates promising results in estimating full-body pose and translation using minimal instrumentation, there are several limitations and opportunities for future work. First, as a purely inertial-based technique, MobilePoser's translation estimation is still susceptible to drift, particularly when devices deviate from their calibrated positions. This can occur when users wear loose clothing, causing the phone in the pocket to move around and resulting in orientation changes. To address this issue, future work could explore re-calibration techniques based on stationary poses or leverage additional sensory information, such as GPS, UWB or visual odometry, to correct for drift.", + "bbox": [ + 81, + 674, + 482, + 825 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Second, akin to prior wor, our evaluation has limitations of being tested on lab collected datasets. All the test datasets (DIP, TotalCapture, IMUPoser) were collected in lab settings due to the need for an accurate external ground truth motion capture system. Although we empirically demonstrate that MobilePoser works in real-world", + "bbox": [ + 81, + 825, + 482, + 896 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "settings (as seen in the accompanying video), we acknowledge the need for future datasets captured in-the-wild.", + "bbox": [ + 513, + 107, + 911, + 133 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Another limitation of MobilePoser, much like other prior works [14, 28, 48, 49], is the need for a calibration step. Currently, users first stand in a T-pose, which aligns the IMU data with the training data based on the SMPL kinematic model. While this calibration process is acceptable for some use cases, such as gaming, it may be less desirable for applications that demand seamless interactions, like indoor navigation. Future work could investigate more natural and unobtrusive calibration procedures, such as detecting common poses like standing with arms by the side using UWB, similar to SmartPoser [11].", + "bbox": [ + 511, + 133, + 913, + 272 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In conclusion, while MobilePoser presents a significant step forward in enabling full-body pose and translation estimation using everyday devices, there remain several avenues for future research to extend the capabilities of this approach.", + "bbox": [ + 513, + 273, + 911, + 329 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "9 CONCLUSION", + "text_level": 1, + "bbox": [ + 514, + 342, + 663, + 354 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this paper, we present MobilePoser, a real-time, on-device system for estimating full-body pose and translation using IMUs in consumer mobile devices (phones, watches, earbuds). By leveraging a multi-stage approach that combines data-driven learning and physics-based optimization, MobilePoser achieves state-of-the-art accuracy while remaining lightweight and efficient. Our extensive evaluation on public datasets demonstrates clear improvements over prior work, both in terms of full-body pose estimation accuracy and enabling novel global translation estimation. Furthermore, we showcase the potential of MobilePoser through a series of proof-of-concept applications in gaming, fitness, and indoor navigation, highlighting its ability to enable new and immersive experiences using the devices people already own.", + "bbox": [ + 511, + 359, + 913, + 541 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "ACKNOWLEDGMENTS", + "text_level": 1, + "bbox": [ + 514, + 554, + 712, + 566 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We thank Jianru Ding from the University of Chicago and Zeya Chen from the Institute of Design, Illinois Institute of Technology for helping film the video. Vasco Xu's and Henry Hoffmann's work on this project is supported by NSF (CCF-1823032 and CNS-1956180).", + "bbox": [ + 511, + 571, + 913, + 642 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 516, + 655, + 633, + 669 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] [n. d]. PlayStation VR. https://www.playstation.com/en-us/explore/playstationvr/.", + "[2] 2023. HTC Vive. https://www.vive.com.", + "[3] Karan Ahuja. 2024. Practical and Rich User Digitization. arXiv:2403.00153 [cs.HC] https://arxiv.org/abs/2403.00153", + "[4] Karan Ahuja, Sven Mayer, Mayank Goel, and Chris Harrison. 2021. Pose-on-the-go: Approximating user pose with smartphone sensor fusion and inverse kinematics. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 1-12.", + "[5] Karan Ahuja, Vivian Shen, Cathy Mengying Fang, Nathan Riopelle, Andy Kong, and Chris Harrison. 2022. Controllerpose: inside-out body capture with VR controller cameras. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems. 1-13.", + "[6] Riku Arakawa, Karan Ahuja, Kristie Mak, Gwendolyn Thompson, Sam Shaaban, Oliver Lindhiem, and Mayank Goel. 2023. LemurDx: Using Unconstrained Passive Sensing for an Objective Measurement of Hyperactivity in Children with no Parent Input. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 2 (2023), 1-23.", + "[7] Riku Arakawa, Bing Zhou, Gurunandan Krishnan, Mayank Goel, and Shree K Nayar. 2023. MI-Poser: Human Body Pose Tracking Using Magnetic and Inertial Sensor Fusion with Metal Interference Mitigation. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 3 (2023), 1-24." + ], + "bbox": [ + 521, + 672, + 913, + 893 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA", + "bbox": [ + 84, + 75, + 326, + 85 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Xu, et al.", + "bbox": [ + 867, + 75, + 911, + 85 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[8] Rayan Armani, Changlin Qian, Jiaxi Jiang, and Christian Holz. 2024. Ultra Inertial Poser: Scalable Motion Capture and Tracking from Sparse Inertial Sensors and Ultra-Wideband Ranging. In ACM SIGGRAPH 2024 Conference Papers. 1-11.", + "[9] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. 2016. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. Springer, 561-578.", + "[10] Nathan Devrio and Chris Harrison. 2022. discoBand: Multiview Depth-Sensing Smartwatch Strap for Hand, Body and Environment Tracking. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-13.", + "[11] Nathan DeVrio, Vimal Mollyn, and Chris Harrison. 2023. SmartPoser: Arm Pose Estimation with a Smartphone and Smartwatch Using UWB and IMU Data. In Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology. 1-11.", + "[12] Roy Featherstone. 2014. Rigid body dynamics algorithms. Springer.", + "[13] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. 2023. Humans in 4d: Reconstructing and tracking humans with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 14783-14794.", + "[14] Yinghao Huang, Manuel Kaufmann, Emre Aksan, Michael J Black, Otmar Hilliges, and Gerard Pons-Moll. 2018. Deep inertial pose: Learning to reconstruct human pose from sparse inertial measurements in real time. ACM Transactions on Graphics (TOG) 37, 6 (2018), 1-15.", + "[15] Fan Jiang, Xubo Yang, and Lele Feng. 2016. Real-time full-body motion reconstruction and recognition for off-the-shelf VR devices. In Proceedings of the 15th ACM SIGGRAPH Conference on Virtual-Reality Continuum and Its Applications in Industry-Volume 1, 309–318.", + "[16] Jiaxi Jiang, Paul Streli, Huajian Qiu, Andreas Fender, Larissa Laich, Patrick Snape, and Christian Holz. 2022. Avatarposer: Articulated full-body pose tracking from sparse motion sensing. In European Conference on Computer Vision. Springer, 443-460.", + "[17] Yifeng Jiang, Yuting Ye, Deepak Gopinath, Jungdam Won, Alexander W Winkler, and C Karen Liu. 2022. Transformer Inertial Poser: Real-time human motion reconstruction from sparse IMUs with simultaneous terrain generation. In SIGGRAPH Asia 2022 Conference Papers. 1-9.", + "[18] Haojian Jin, Zhijian Yang, Swarun Kumar, and Jason I Hong. 2018. Towards wearable everyday body-frame tracking using passive RFIDs. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 4 (2018), 1-23.", + "[19] Daehwa Kim and Chris Harrison. 2022. Etherpose: Continuous hand pose tracking with wrist-worn antenna impedance characteristic sensing. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-12.", + "[20] David Kim, Otmar Hilliges, Shahram Izadi, Alex D Butler, Jiawen Chen, Jason Oikonomidis, and Patrick Olivier. 2012. Digits: freehand 3D interactions anywhere using a wrist-worn gloveless sensor. In Proceedings of the 25th annual ACM symposium on User interface software and technology. 167-176.", + "[21] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014).", + "[22] Alexander Kyu, Hongyu Mao, Junyi Zhu, Mayank Goel, and Karan Ahuja. 2024. EITPose: Wearable and Practical Electrical Impedance Tomography for Continuous Hand Pose Estimation. In Proceedings of the CHI Conference on Human Factors in Computing Systems. 1-10.", + "[23] Jiye Lee and Hanbyul Joo. 2024. Mocap Everyone Everywhere: Lightweight Motion Capture With Smartwatches and a Head-Mounted Camera. arXiv preprint arXiv:2401.00847 (2024).", + "[24] Yilin Liu, Shijia Zhang, and Mahanth Gowda. 2021. NeuroPose: 3D hand pose tracking using EMG wearables. In Proceedings of the Web Conference 2021. 1471-1482.", + "[25] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. 2015. SMPL: A Skinned Multi-Person Linear Model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34, 6 (Oct. 2015), 248:1-248:16.", + "[26] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. 2019. AMASS: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision. 5442-5451.", + "[27] Microsoft Corporation. [n.d.]. Microsoft Kinect.", + "[28] Vimal Mollyn, Riku Arakawa, Mayank Goel, Chris Harrison, and Karan Ahuja. 2023. IMUPoser: Full-Body Pose Estimation using IMUs in Phones, Watches, and Earbuds. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-12.", + "[29] NaturalPoint, Inc. [n.d.]. OptiTrack. https://www.optitrack.com.", + "[30] Shu Nishiguchi, Minoru Yamada, Koutatsu Nagai, Shuhei Mori, Yuu Kajiwara, Takuya Sonoda, Kazuya Yoshimura, Hiroyuki Yoshitomi, Hiromu Ito, Kazuya Okamoto, et al. 2012. Reliability and validity of gait analysis by android-based smartphone. Telemedicine and e-Health 18, 4 (2012), 292–296." + ], + "bbox": [ + 86, + 108, + 482, + 883 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[31] Northern Digital Inc. 2020. travSTAR. https://www.ndigital.com/msci/products/drivebay-trakstar.", + "[32] Mathias Parger, Joerg H Mueller, Dieter Schmalstieg, and Markus Steinberger. 2018. Human upper-body inverse kinematics for increased embodiment in consumer-grade virtual reality. In Proceedings of the 24th ACM symposium on virtual reality software and technology. 1-10.", + "[33] Polhemus. 2020. Polhemus Motion Capture System. https://polhemus.com/.", + "[34] PolyCam. [n.d.]. PolyCam. https://poly.cam/.", + "[35] Jose Luis Ponton, Haoran Yun, Andreas Aristidou, Carlos Andujar, and Nuria Pelechano. 2023. SparsePoser: Real-time Full-body Motion Reconstruction from Sparse Data. ACM Transactions on Graphics 43, 1 (2023), 1-14.", + "[36] Jathushan Rajasegaran, Georgios Pavlakos, Angjoo Kanazawa, and Jitendra Malik. 2021. Tracking people with 3D representations. arXiv preprint arXiv:2111.07868 (2021).", + "[37] Nirupam Roy, He Wang, and Romit Roy Choudhury. 2014. I am a smartphone and i can tell my user's walking direction. In Proceedings of the 12th annual international conference on Mobile systems, applications, and services. 329-342.", + "[38] Takaki Shiratori, Hyun Soo Park, Leonid Sigal, Yaser Sheikh, and Jessica K Hodgins. 2011. Motion capture from body-mounted cameras. In ACM SIGGRAPH 2011 papers. 1-10.", + "[39] Ivan E Sutherland. 1968. A head-mounted three dimensional display. In Proceedings of the December 9-11, 1968, fall joint computer conference, part I. 757-764.", + "[40] Matthew Trumble, Andrew Gilbert, Charles Malleson, Adrian Hilton, and John Collomosse. 2017. Total capture: 3d human pose estimation fusing video and inertial sensors. In Proceedings of 28th British Machine Vision Conference. 1-13.", + "[41] Vicon Motion Systems Ltd. [n.d.]. Vicon. https://www.vicon.com.", + "[42] Daniel Vlasic, Rolf Adelsberger, Giovanni Vannucci, John Barnwell, Markus Gross, Wojciech Matusik, and Jovan Popovic. 2007. Practical motion capture in everyday surroundings. ACM transactions on graphics (TOG) 26, 3 (2007), 35-es.", + "[43] Timo Von Marcard, Bodo Rosenhahn, Michael J Black, and Gerard Pons-Moll. 2017. Sparse inertial poser: Automatic 3d human pose estimation from sparse imus. In Computer graphics forum, Vol. 36. Wiley Online Library, 349-360.", + "[44] Erwin Wu, Ye Yuan, Hui-Shyong Yeo, Aaron Quigley, Hideki Koike, and Kris M Kitani. 2020. Back-hand-posed: 3d hand pose estimation for a wrist-worn camera via dorsum deformation network. In Proceedings of the 33rd Annual ACM Symposium on User Interface Software and Technology. 1147–1160.", + "[45] Xsens Technologies B.V. [n.d.]. Xsens IMU Systems. https://www.xsens.com. Accessed: 2024-03-07.", + "[46] Hang Yan, Qi Shan, and Yasutaka Furukawa. 2018. RIDI: Robust IMU double integration. In Proceedings of the European conference on computer vision (ECCV), 621-636.", + "[47] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Vladislav Golyanik, Shaohua Pan, Christian Theobalt, and Feng Xu. 2023. EgoLocate: Real-time Motion Capture, Localization, and Mapping with Sparse Body-mounted Sensors. arXiv preprint arXiv:2305.01599 (2023).", + "[48] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Soshi Shimada, Vladislav Golyanik, Christian Theobalt, and Feng Xu. 2022. Physical inertial poser (pip): Physics-aware real-time human motion tracking from sparse inertial sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 13167-13178.", + "[49] Xinyu Yi, Yuxiao Zhou, and Feng Xu. 2021. Transpose: Real-time 3d human translation and pose estimation with six inertial sensors. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-13.", + "[50] Yang Zhang, Chouchang Yang, Scott E Hudson, Chris Harrison, and Alanson Sample. 2018. Wall++ room-scale interactive and context-aware sensing. In Proceedings of the 2018 chi conference on human factors in computing systems. 1-15.", + "[51] Mingmin Zhao, Tianhong Li, Mohammad Abu Alsheikh, Yonglong Tian, Hang Zhao, Antonio Torralba, and Dina Katabi. 2018. Through-wall human pose estimation using radio signals. In Proceedings of the IEEE conference on computer vision and pattern recognition. 7356-7365.", + "[52] Li'an Zhuo, Jian Cao, Qi Wang, Bang Zhang, and Liefeng Bo. 2023. Towards Stable Human Pose Estimation via Cross-View Fusion and Foot Stabilization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 650-659." + ], + "bbox": [ + 516, + 108, + 913, + 763 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices", + "bbox": [ + 84, + 75, + 648, + 85 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA", + "bbox": [ + 671, + 75, + 913, + 87 + ], + "page_idx": 10 + } +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_model.json b/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_model.json new file mode 100644 index 0000000000000000000000000000000000000000..a4bb5b6baf5106514f1946ec9d3b4decf9cb7c09 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_model.json @@ -0,0 +1,3082 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.264, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.12492v1 [cs.HC] 16 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.102, + 0.885, + 0.15 + ], + "angle": 0, + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + }, + { + "type": "text", + "bbox": [ + 0.259, + 0.158, + 0.338, + 0.173 + ], + "angle": 0, + "content": "Vasco Xu" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.175, + 0.373, + 0.189 + ], + "angle": 0, + "content": "University of Chicago" + }, + { + "type": "text", + "bbox": [ + 0.252, + 0.19, + 0.346, + 0.205 + ], + "angle": 0, + "content": "Chicago, USA" + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.206, + 0.378, + 0.22 + ], + "angle": 0, + "content": "vascoxu@uchicago.edu" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.232, + 0.368, + 0.248 + ], + "angle": 0, + "content": "Henry Hoffmann" + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.249, + 0.372, + 0.263 + ], + "angle": 0, + "content": "University of Chicago" + }, + { + "type": "text", + "bbox": [ + 0.251, + 0.264, + 0.346, + 0.278 + ], + "angle": 0, + "content": "Chicago, USA" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.279, + 0.408, + 0.293 + ], + "angle": 0, + "content": "hankhoffmann@cs.uchicago.edu" + }, + { + "type": "text", + "bbox": [ + 0.642, + 0.158, + 0.76, + 0.174 + ], + "angle": 0, + "content": "Chenfeng Gao" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.175, + 0.786, + 0.189 + ], + "angle": 0, + "content": "Northwestern University" + }, + { + "type": "text", + "bbox": [ + 0.65, + 0.19, + 0.752, + 0.203 + ], + "angle": 0, + "content": "Evanston, USA" + }, + { + "type": "text", + "bbox": [ + 0.57, + 0.205, + 0.833, + 0.22 + ], + "angle": 0, + "content": "chenfenggao2029@u.northwestern.edu" + }, + { + "type": "text", + "bbox": [ + 0.649, + 0.232, + 0.753, + 0.248 + ], + "angle": 0, + "content": "Karan Ahuja" + }, + { + "type": "text", + "bbox": [ + 0.616, + 0.248, + 0.785, + 0.263 + ], + "angle": 0, + "content": "Northwestern University" + }, + { + "type": "text", + "bbox": [ + 0.649, + 0.264, + 0.752, + 0.277 + ], + "angle": 0, + "content": "Evanston, USA" + }, + { + "type": "text", + "bbox": [ + 0.61, + 0.278, + 0.791, + 0.293 + ], + "angle": 0, + "content": "kahuja@northwestern.edu" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.31, + 0.497, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.31, + 0.842, + 0.522 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.535, + 0.914, + 0.565 + ], + "angle": 0, + "content": "Figure 1: MobilePoser uses any subset of consumer mobile devices (phones, watches, earbuds) available to estimate full-body pose and global translation." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.569, + 0.185, + 0.583 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.588, + 0.483, + 0.783 + ], + "angle": 0, + "content": "There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.571, + 0.914, + 0.614 + ], + "angle": 0, + "content": "with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.625, + 0.654, + 0.639 + ], + "angle": 0, + "content": "CCS CONCEPTS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.643, + 0.916, + 0.672 + ], + "angle": 0, + "content": "- Human-centered computing \\(\\rightarrow\\) Ubiquitous and mobile computing." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.683, + 0.622, + 0.697 + ], + "angle": 0, + "content": "KEYWORDS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.701, + 0.914, + 0.716 + ], + "angle": 0, + "content": "Motion capture, sensors, inertial measurement units, mobile devices" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.721, + 0.663, + 0.733 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.734, + 0.916, + 0.809 + ], + "angle": 0, + "content": "Vasco Xu, Chenfeng Gao, Henry Hoffmann, and Karan Ahuja. 2024. MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices. In The 37th Annual ACM Symposium on User Interface Software and Technology (UIST '24), October 13–16, 2024, Pittsburgh, PA, USA. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3654777.3676461" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.822, + 0.688, + 0.836 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.841, + 0.916, + 0.898 + ], + "angle": 0, + "content": "Full-body motion capture has numerous applications in gaming, fitness, and virtual and augmented reality (VR/AR), enabling immersive experiences and context-aware interactions. While vision-based approaches for 3D human pose estimation have shown great" + }, + { + "type": "page_footnote", + "bbox": [ + 0.082, + 0.801, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.854, + 0.317, + 0.864 + ], + "angle": 0, + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.865, + 0.305, + 0.875 + ], + "angle": 0, + "content": "© 2024 Copyright held by the owner/author(s)." + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.875, + 0.254, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 979-8-4007-0628-8/24/10" + }, + { + "type": "page_footnote", + "bbox": [ + 0.086, + 0.885, + 0.273, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/3654777.3676461" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.327, + 0.087 + ], + "angle": 0, + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + }, + { + "type": "header", + "bbox": [ + 0.868, + 0.077, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Xu, et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.339, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.342, + 0.104, + 0.5, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.104, + 0.657, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.66, + 0.104, + 0.788, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.788, + 0.104, + 0.912, + 0.361 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.375, + 0.916, + 0.418 + ], + "angle": 0, + "content": "Figure 2: Real-time global pose estimation powered by MobilePoser: (A) Person with smartwatch (left wrist) waving their hands. (B) Person with smartwatch (left wrist) performing jumping jacks. (C) Person wearing a smartwatch (left wrist) and carrying a phone in their right pocket running." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.438, + 0.483, + 0.508 + ], + "angle": 0, + "content": "promise, they require subjects to be within the camera's field of view, limiting their practicability for mobile and on-the-go applications. In contrast, inertial measurement unit (IMU) based techniques offer an attractive alternative, enabling less intrusive and occlusion-free user digitization [3]." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.508, + 0.483, + 0.7 + ], + "angle": 0, + "content": "Commercial systems such as Xsens [45] use up to 17 special-purpose sensors to provide highly accurate pose estimations. However, such approaches are intrusive, making them undesirable for everyday use. Consequently, there has been a trend towards minimizing instrumentation. Sparse inertial pose capture methods, such as TransPose [49] and DIP [14], use 6 IMUs to achieve a balance between accuracy and practicality. Yet, these methods still require expensive and special-purpose IMUs attached to specific body joints. To enable full-body motion tracking without any external infrastructure, IMUPoser [28] leverages IMUs in devices we already carry around with us, namely smartphones, smartwatches, and earbuds. These commodity devices, however, use lower-fidelity IMUs, which compromises online performance, temporal consistency, and global translation estimation." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.702, + 0.483, + 0.854 + ], + "angle": 0, + "content": "In this work, we present MobilePoser, a real-time user digitization technique that tracks both poses and global movement (referred to as translation) using consumer devices (Figure 1) such as watches, phones and earbuds. To enable on-the-go motion tracking without any external infrastructure, we must address a set of unique challenges. First, the number of instrumented points is dynamically changing and sparse (at most three devices, with as few as one), making the problem highly under-constrained. Second, IMUs do not directly measure positional data, making global translation tracking non-trivial. Additionally, noise and drift from the low-cost IMUs found in commodity devices complicates pose and translation" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.438, + 0.913, + 0.465 + ], + "angle": 0, + "content": "estimation. Finally, such a system should operate directly on-device for real-time use, anywhere, anytime." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.466, + 0.915, + 0.729 + ], + "angle": 0, + "content": "MobilePoser tackles these challenges by employing a multi-stage approach. For pose estimation, it utilizes a deep neural network (DNN) to predict full-body pose from the available IMU data, followed by a physics-based optimization step to ensure spatiotemporal consistency and plausible kinematics. This greatly helps resolve ambiguous instrumented joint motion profiles, such as differentiating between waving (Figure 2 A) versus jumping jacks (Figure 2 B) from only a single smartwatch on the wrist. To aid in generalizability, the model is trained on a large dataset of synthesized IMU measurements generated from high-quality motion capture (MoCap) data. For global translation estimation, MobilePoser employs a hybrid approach that fuses predictions from a foot contact-based method and a DNN-based method that directly regresses the root joint velocity. This combination enables accurate and robust translation estimation, even in challenging scenarios where both feet are in motion together (Figure 2 C). Importantly, MobilePoser is optimized to run on-device, achieving real-time performance of 60 frames per second on a smartphone (iPhone 15 Pro), making it suitable for mobile applications." + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.73, + 0.915, + 0.743 + ], + "angle": 0, + "content": "In summary, MobilePoser makes the following key contributions:" + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.758, + 0.916, + 0.799 + ], + "angle": 0, + "content": "(1) It presents a novel framework for inertial translation estimation using consumer devices, enabling accurate tracking of global movement without specialized hardware." + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.8, + 0.915, + 0.854 + ], + "angle": 0, + "content": "(2) It achieves state-of-the-art full-body pose estimation across various on-body configurations of commodity IMU devices, demonstrating robust performance with as few as one and up to three wearable devices." + }, + { + "type": "text", + "bbox": [ + 0.533, + 0.855, + 0.915, + 0.896 + ], + "angle": 0, + "content": "(3) It provides an open-source implementation that runs in real-time on edge devices, making it accessible and practical for widespread use." + }, + { + "type": "list", + "bbox": [ + 0.533, + 0.758, + 0.916, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.884, + 0.429, + 0.896 + ], + "angle": 0, + "content": "1Note, we count the left and right earbuds as a unified single IMU stream" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.65, + 0.087 + ], + "angle": 0, + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + }, + { + "type": "header", + "bbox": [ + 0.671, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + }, + { + "type": "table", + "bbox": [ + 0.151, + 0.103, + 0.849, + 0.219 + ], + "angle": 0, + "content": "
System# Inst. JointsFPSConsumer DeviceTranslationMPJVE (cm)Jitter (102m/s3)
Xsens [45]17120×--
SIP [43]660×7.73.8
DIP [14]629××8.930.13
TransPose [49]690×7.11.4
PIP [48]660×5.90.24
IMUPoser [28]1-325×12.11.9
MobilePoser (our work)1-36010.60.97
" + }, + { + "type": "table_caption", + "bbox": [ + 0.277, + 0.219, + 0.72, + 0.232 + ], + "angle": 0, + "content": "Table 1: Comparison with key prior work on the DIP-IMU dataset." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.267, + 0.26, + 0.282 + ], + "angle": 0, + "content": "2 RELATED WORK" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.288, + 0.449, + 0.304 + ], + "angle": 0, + "content": "2.1 User Digitization with External Sensors" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.305, + 0.483, + 0.459 + ], + "angle": 0, + "content": "Commercial motion capture systems such as OptiTrack [29] and Vicon [41] use specialized hardware, such as multiple calibrated high-speed infrared cameras, to track retroreflective markers attached to a user's body. Such setups are commonly used in games, movies and character animations that require millimeter accuracy and are the gold standard of motion capture. The expensive infrastructure required by commercial systems, makes them impractical for everyday use. Therefore, much research has been devoted to instrumentation-free approaches using monocular cameras. Such approaches generally rely on RGB [9, 13, 36] or depth [27] cameras based computer vision techniques to predict body pose." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.459, + 0.483, + 0.584 + ], + "angle": 0, + "content": "There also exists specialized external hardware for pose tracking in Extended Reality (XR). For example, the HTC Vive [2], PlayStation VR [1] and Oculus Rift [32] track the head, handheld controllers and other limb-borne accessories using external sensor base stations for Virtual Reality (VR) applications. The un-sensed joints are estimated with inverse kinematics [15] or learning-based methods [16, 35]. Other non-optical external approaches for pose estimation include capacitive sensing [50], magnetic fields [31, 33], RF [51], and mechanical linkages [39]." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.612, + 0.437, + 0.643 + ], + "angle": 0, + "content": "2.2 User Digitization with non-IMU Worn Sensors" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.647, + 0.483, + 0.744 + ], + "angle": 0, + "content": "Wearable sensors provide a portable and flexible alternative to external sensors. For example, MI-Poser [7] uses magnetic tracking in wristbands and AR glasses to estimate upper-body poses. Other works have explored wrist-worn cameras [20, 44], EMG sensors [24], EIT sensors [22], wrist-worn antennas [19] and depth sensor armbands [10]. However, these works focus solely on capturing the motion of specific body parts (e.g., wrist or upper-body)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.745, + 0.483, + 0.897 + ], + "angle": 0, + "content": "To capture full-body motion, a popular approach is to use body-mounted cameras coupled with computer vision techniques [5, 38]. Other works have explored different sensor technologies such as ultrasonic sensors [42] and RFID [18]. Nevertheless, these works require users to wear sensors they do not already have. Pose-On-The-Go [4] addresses this by estimating full-body pose via extreme sensor fusion, leveraging a phone's front and rear cameras, thus requiring no special instrumentation. However, its computationally expensive and relies heavily on heuristics to power body poses, often resulting in unnatural motions. MobilePoser differentiates itself by focusing on full-body pose estimation using power-efficient" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.269, + 0.915, + 0.296 + ], + "angle": 0, + "content": "IMUs already found in consumer devices, such as smartphones, smartwatches, and earbuds." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.308, + 0.895, + 0.324 + ], + "angle": 0, + "content": "2.3 User Digitization with IMU Worn Sensors" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.327, + 0.915, + 0.424 + ], + "angle": 0, + "content": "Commercial motion capture systems, such as Xsens [45], use a large number of inertial sensors (typically 17) strapped to the body to provide high-quality motion capture. These setups consist of homogeneous, high-grade IMUs that are calibrated for noise and have known positions on the body, resulting in a less ill-posed problem compared to using sparse, heterogeneous sensors. However, such an approach is highly inconvenient and intrusive for everyday use." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.424, + 0.915, + 0.576 + ], + "angle": 0, + "content": "To address this limitation, researchers have explored reconstructing human motions from a reduced number of sensors. Works such as SIP [43], DIP [14], PIP [48], TIP [17], and TransPose [49] have demonstrated the feasibility of using only 6 commercial-grade Xsens IMU sensors for full-body motion capture. Works have further explored integrating other input modalities (e.g. UWB [8] and egocentric images [47]) in addition to the 6 IMUs for increased performance. All these approaches leverage the homogeneity and known calibrated positions of the sensors to achieve accurate pose estimation. However, even 6 sensors can be cumbersome for on-the-go applications, especially those that require passive sensing." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.576, + 0.915, + 0.701 + ], + "angle": 0, + "content": "Recent research has investigated even sparser IMU configurations using commodity devices. IMUPoser [28], which is most closely related to our work, performs pose estimation using any combination of smartphone, smartwatch, and earbuds. While IMUPoser tackles the challenges of heterogeneous sensor quality for pose estimation, it lacks global translation due to IMU noise and drift, and contains unrealistic spatio-temporal motion artifacts. Additionally, IMUPoser runs on a laptop at \\(25\\mathrm{Hz}\\), limiting its practicality for real-time mobile applications." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.7, + 0.915, + 0.798 + ], + "angle": 0, + "content": "In contrast, MobilePoser addresses these limitations by demonstrating improved pose estimation accuracy on widely used benchmarks while also estimating global translation (see Table 1). Furthermore, our system is designed to run fully on-device, achieving real-time performance of 60 fps on edge mobile devices. This enables MobilePoser to provide a more practical and accessible solution for on-the-go motion capture using commodity devices." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.808, + 0.673, + 0.822 + ], + "angle": 0, + "content": "3 MOBILEPOSER" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.827, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Estimating a user's full-body pose from a sparse set of IMU observations is a severely under-constrained problem as it aims to infer a high-dimensional quantity, i.e., the full-body pose, from low-dimensional observations that only capture partial motion at each instrumented point. Moreover, multiple possible solutions could" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.328, + 0.087 + ], + "angle": 0, + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + }, + { + "type": "header", + "bbox": [ + 0.868, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Xu, et al." + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.109, + 0.885, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.329, + 0.916, + 0.4 + ], + "angle": 0, + "content": "Figure 3: MobilePoser system overview. MobilePoser accepts any available subset of IMU data from the user and masks absent devices by setting their values to zero. The IMU data is then fed into two main modules: (1) Pose Estimation, which first estimates joint positions followed by joint rotations, and (2) Translation Estimation, which combines foot-ground contact probabilities with a direct neural network-based approach to regress global velocity. Finally, a Physics Optimizer refines the predicted joint rotations and global translation to ensure they satisfy physical constraints." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.42, + 0.484, + 0.519 + ], + "angle": 0, + "content": "explain the observed data, making it challenging to determine the correct pose. To tackle these challenges, we introduce MobilePoser, a system that leverages data-driven learning and physics-based optimization to estimate accurate and plausible full-body poses and global translations from sparse IMU inputs. Figure 3 provides an overview of our pipeline, which we describe in detail in the following sections." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.546, + 0.238, + 0.561 + ], + "angle": 0, + "content": "3.1 System Input" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.564, + 0.483, + 0.702 + ], + "angle": 0, + "content": "MobilePoser takes as input acceleration and orientation readings from IMUs across any subset of three consumer devices: smartphones, smartwatches, and earbuds. Each of these devices can be placed at different body locations, resulting in various possible combinations. For instance, a smartphone can be stored in the left or right pocket, held in the left or right hand, placed next to the head during a call, or not carried by the user at all. Similarly, smartwatches can be worn on either wrist or not worn at all, while earbuds can be worn, placed in a charging case stored in either pocket, or not carried by the user." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.702, + 0.485, + 0.785 + ], + "angle": 0, + "content": "Following IMUPoser [28], we consider 24 plausible device-location combinations across five body locations: right pocket, left pocket, right wrist, left wrist, and head. These combinations cover the various ways users might carry or wear their devices throughout the day. Regardless of the input device combination, our model expects IMU data from the five predefined body locations." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.786, + 0.484, + 0.897 + ], + "angle": 0, + "content": "The IMU signal at each location consists of acceleration (3 values) and orientation (a \\(3 \\times 3\\) rotation matrix), resulting in a total of 12 IMU values per location. Across all five locations, this yields an input vector \\(x \\in \\mathbb{R}^{60}\\). However, since at any given time only a subset of 1-3 devices may be present, data from absent devices is masked and set to zero. This masking approach allows us to build a unified model that can handle the varying number of available devices and their changing on-body location seamlessly. This further eliminates" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.42, + 0.916, + 0.448 + ], + "angle": 0, + "content": "the need for training separate models for each possible combination, making the system more practical and efficient." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.46, + 0.776, + 0.475 + ], + "angle": 0, + "content": "3.2 Full-Body Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.479, + 0.916, + 0.606 + ], + "angle": 0, + "content": "To learn a mapping from IMU input to full-body pose, we employ a data-driven, multi-stage neural network approach. Specifically, our pose estimation network consists of two submodules: Joint predictor \\((\\mathcal{F}^{joint})\\) and Rotation predictor \\((\\mathcal{F}^{\\theta})\\). More specifically, \\(\\mathcal{F}^{joint}\\) estimates joint positions as an intermediate task and \\(\\mathcal{F}^{\\theta}\\) solves for the joint angle orientations. Both submodules use a bidirectional LSTM (bi-LSTM), to model both spatial and temporal information [14]. We input data into both submodules in a sliding-window fashion with window length \\(N\\)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.612, + 0.916, + 0.737 + ], + "angle": 0, + "content": "3.2.1 Joint Pose Estimation \\((\\mathcal{F}^{joint})\\). This module estimates the joint positions from a sequence of IMU measurements. We explicitly estimate joint positions as an intermediate step, as it helps extract useful information from linear accelerations due to its linear correlation with joint positions [49]. The input to \\(\\mathcal{F}^{joint}\\) is \\(x^{imu}(t) = [x_{t-N}, \\ldots, x_t]\\), where \\(t\\) is the current time step and \\(N\\) is the time window length. The output are the root (pelvis) relative 3D positions of the 24 SMPL body joints [25] \\(\\pmb{p}(t) = [\\pmb{p}_{t-N}, \\ldots, \\pmb{p}_t] \\in \\mathbb{R}^{N \\times 72}\\). The loss function used to train this network is:" + }, + { + "type": "equation", + "bbox": [ + 0.648, + 0.742, + 0.913, + 0.759 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {j o i n t}} = \\left\\| \\mathbf {p} - \\mathbf {p} _ {G T} \\right\\| _ {2} ^ {2} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.763, + 0.915, + 0.791 + ], + "angle": 0, + "content": "where the subscript \\(GT\\) denotes the ground truth and \\(p\\) represents the full-body SMPL joint positions." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.799, + 0.916, + 0.897 + ], + "angle": 0, + "content": "3.2.2 Joint Rotation and Body Mesh Estimation \\((\\mathcal{F}^{\\theta})\\). Here we employ a neural kinematic estimator to regress joint rotations from the previously estimated positions. We concatenate the joint coordinates from \\(\\mathcal{F}^{joint}\\) with IMU measurements, which serves as the input to \\(\\mathcal{F}^{\\theta}\\). Note, while the SMPL body encodes 24 joints, only 18 are relevant from a rotation prediction perspective as the fingers, wrist and toes are independent of the on-body IMUs and" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.65, + 0.087 + ], + "angle": 0, + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + }, + { + "type": "header", + "bbox": [ + 0.671, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.149 + ], + "angle": 0, + "content": "are hence set to identity rotation matrices [49]. The outputs of the network are the 18 root relative joint orientations represented as 6D rotations: \\(\\pmb{\\theta}(t) = [\\pmb{\\theta}_{t-N},\\dots,\\pmb{\\theta}_t] \\in \\mathbb{R}^{N \\times 108}\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.149, + 0.483, + 0.246 + ], + "angle": 0, + "content": "Our joint rotation loss consists of three terms: \\(\\mathcal{L}_{ori}\\), \\(\\mathcal{L}_{pos}\\), \\(\\mathcal{L}_{jerk}\\). The loss term \\(\\mathcal{L}_{ori}\\) is a standard L2 loss from the ground truth joint rotations. The term \\(\\mathcal{L}_{pos}\\) penalizes error accumulating along the kinematic chain. Finally, \\(\\mathcal{L}_{jerk}\\) promotes temporally smooth predictions, where \\(jerk(\\theta) = \\theta_{t-3} + 3\\theta_{t-2} - 3\\theta_{t-1} + \\theta_t\\) is a function that computes the jerk of a signal \\(\\theta\\) at time step \\(t\\), penalizing the deviation between neighboring frames [49]." + }, + { + "type": "text", + "bbox": [ + 0.099, + 0.246, + 0.483, + 0.26 + ], + "angle": 0, + "content": "Our combined joint rotation loss function can be represented as," + }, + { + "type": "equation", + "bbox": [ + 0.205, + 0.265, + 0.483, + 0.282 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\theta} = \\mathcal {L} _ {\\text {o r i}} + \\mathcal {L} _ {\\text {p o s}} + \\lambda \\mathcal {L} _ {\\text {j e r k}} \\tag {2}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.197, + 0.284, + 0.483, + 0.302 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {o r i}} = \\left\\| \\theta - \\theta_ {G T} \\right\\| _ {2} ^ {2} \\tag {3}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.194, + 0.304, + 0.483, + 0.321 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {p o s} = \\left\\| \\mathrm {F K} (\\theta) - \\mathbf {p} _ {G T} \\right\\| _ {2} ^ {2} \\tag {4}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.188, + 0.324, + 0.483, + 0.361 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {j e r k} = \\sum_ {t} ^ {T} j e r k (\\theta) \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.366, + 0.483, + 0.421 + ], + "angle": 0, + "content": "where \\( FK(\\cdot) \\) is the forward kinematics function, that computes joint coordinates from joint rotations. Given the joint rotations, the parametric SMPL body model generates a corresponding body mesh with 6890 vertices." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.433, + 0.376, + 0.447 + ], + "angle": 0, + "content": "3.3 Global Translation Estimation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.452, + 0.483, + 0.576 + ], + "angle": 0, + "content": "Translation estimation from IMUs is challenging as they lack direct distance measurements. Moreover, IMUs are prone to noise and biases, which causes techniques such as double-integration of acceleration to rapidly accumulate errors [46]. Therefore, inspired by prior work [23, 48, 49], we estimate per-frame velocity of the root joint using two submodules: a foot-ground contact \\((v_{f})\\) and a neural network based root velocity estimator \\((v_{e})\\). We fuse the output of the two submodules to obtain a final estimate of global translation." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.584, + 0.483, + 0.792 + ], + "angle": 0, + "content": "3.3.1 Foot-Ground Contact based Root Velocity \\((v_{f})\\). Here we estimate the probability of each foot contacting the ground independently using a bi-LSTM network. The input to the model is the concatenated vector of joint positions and IMU measurements. The output of the network is the likelihood that each foot is contacting the ground, denoted as \\(c_{foot} = [c_{lfoot}, c_{rfoot}] \\in \\mathbb{R}^2\\). The foot with the higher foot-ground contact probability is defined as the supporting foot, \\(s = \\max \\{c_{\\mathrm{foot}}, c_{\\mathrm{rfoot}}\\}\\). The root velocity, \\(v_{f}(t) \\in \\mathbb{R}^{3}\\), is then computed as the coordinate difference of the supporting foot between consecutive frames. This approach helps capture natural body motions, as movement is significantly influenced by the supporting foot's dynamics [37]. For example, when walking, the body's movement is propelled forward and stabilized by the foot contacting the ground. The network is trained using binary cross-entropy loss." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.799, + 0.483, + 0.897 + ], + "angle": 0, + "content": "3.3.2 Neural Network based Root Velocity \\((v_{e})\\). While the supporting foot contact based method yields plausible human movement, it inherently fails when both feet are not contacting the ground (e.g., when running or jumping). To accommodate such cases, we estimate per-frame root velocity directly using a neural network. We again use the predicted joint coordinates and IMU measurements as input. Compared to previous submodules that use a bi-LSTM for" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.584, + 0.483, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.916, + 0.163 + ], + "angle": 0, + "content": "prediction, this module uses a unidirectional LSTM due to its capacity to capture longer historical context. The output is per-frame root velocity, denoted as \\( v_{e}(t) \\in \\mathbb{R}^{3} \\). The network is trained using a cumulative L2 loss [49]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.171, + 0.916, + 0.353 + ], + "angle": 0, + "content": "3.3.3 Module Fusion. Both modules offer different trade-offs in terms of predicting translation. Supporting foot provides more realistic estimates by leveraging human kinematics but fails when both feet are off the ground. On the other hand, directly estimating root velocity is more general but is highly prone to unnatural movements such as foot sliding [52]. To achieve the benefits of both, we adopt the heuristic-based fusion approach, inspired by TransPose [49]. In summary, when the foot contact \\( c \\) is higher than an upper-threshold \\( \\overline{q} \\), we are confident of ground contact by a foot and hence we rely on \\( (v_{f}) \\) for translation estimation. When the foot contact is below a lower-threshold, \\( q \\), we rely on \\( (v_{e}) \\). For intermediate probabilities, we fuse both velocity estimations using a weighted sum, to output the final global velocity estimate \\( v \\):" + }, + { + "type": "equation", + "bbox": [ + 0.643, + 0.358, + 0.915, + 0.391 + ], + "angle": 0, + "content": "\\[\nv = \\frac {q - \\bar {q}}{\\underline {{q}} - \\bar {q}} v _ {e} + \\frac {q - \\underline {{q}}}{\\bar {q} - \\underline {{q}}} v _ {f} \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.398, + 0.866, + 0.413 + ], + "angle": 0, + "content": "Following previous work [49], we use \\( q = 0.5 \\) and \\( \\overline{q} = 0.9 \\)." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.426, + 0.78, + 0.441 + ], + "angle": 0, + "content": "3.4 Physics-Aware Refinement" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.444, + 0.915, + 0.694 + ], + "angle": 0, + "content": "Our pose and translation estimation networks output the user's global pose based on a history of IMU measurements. When trained on sufficiently large amounts of data, the full-body pose estimation and global translation estimation neural networks learn the human motion manifold and produce realistic poses. However, despite the best modeling efforts, the outputs may still contain inter-mesh penetration, temporal artifacts such as jitter, foot-floor penetration and foot skating. To address these issues, we add an off-the-shelf physics motion optimizer [48]. The physics optimizer uses two proportional derivative (PD) controllers to compute the desired acceleration of the simulated character that best reproduces the estimated pose while satisfying physical constraints, such as the equation of motion [12]. The inputs to the physics optimizer are the estimated joint angles \\(\\theta\\), the foot-ground contact probabilities \\(c_{foot}\\), and the neural network based root velocity \\(v_{e}\\). The outputs are the optimized joint angles and global translation with reduced jitter and foot-ground penetration (Figure 4). For a detailed overview of the physics optimizer, we refer readers to PIP [48]." + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.717, + 0.915, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.864, + 0.915, + 0.893 + ], + "angle": 0, + "content": "Figure 4: Demonstration of the physics optimizer's ability to reduce foot-ground penetration." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.328, + 0.088 + ], + "angle": 0, + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + }, + { + "type": "header", + "bbox": [ + 0.868, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Xu, et al." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.106, + 0.292, + 0.12 + ], + "angle": 0, + "content": "3.5 Real-time Inference" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.125, + 0.482, + 0.194 + ], + "angle": 0, + "content": "We implement proof-of-concept applications in iOS, using an Apple iPhone 15 Pro, Apple Watch Series 9 and Apple AirPods Pro. The iPhone, Apple Watch and AirPods sample IMU data at 60, 60 and \\(25\\mathrm{Hz}\\) respectively. For uniformity, we convert all the IMU data to \\(60\\mathrm{Hz}\\) by upsampling the AirPods." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.194, + 0.482, + 0.332 + ], + "angle": 0, + "content": "We employ the active device selection strategy proposed by IMUPoser [28], wherein the UWB and inertial data is used to track the active devices and their on-body locations. For initial prototyping, the Apple Watch and AirPods communicate over Bluetooth to the iPhone, which streams data to a MacBook Air 2022 via socket. Post connection, a small calibration step is performed to align the IMU measurements with the training data, similar to prior work [14, 28, 49]. Following the setup, data is streamed to the laptop for pre-processing, inference and then relayed to Unity applications for visualization." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.332, + 0.482, + 0.403 + ], + "angle": 0, + "content": "To further prototype an on-device edge model, we convert our trained PyTorch model into CoreML with mixed precision quantization and evaluate its performance. On an iPhone 15 Pro, our model incurs \\(\\sim 14\\mathrm{ms}\\) model inference time running at \\(60\\mathrm{Hz}\\), capped by input IMU sampling rate." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.413, + 0.472, + 0.428 + ], + "angle": 0, + "content": "4 DATA SYNTHESIS AND MODEL TRAINING" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.432, + 0.483, + 0.488 + ], + "angle": 0, + "content": "Model training requires a large collection of synchronized IMU measurements and corresponding SMPL body poses. We leverage the AMASS [26] MoCap dataset, which provides an extensive collection of such data(~40 hours), including translation." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.499, + 0.345, + 0.515 + ], + "angle": 0, + "content": "4.1 Full-Body Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.517, + 0.483, + 0.656 + ], + "angle": 0, + "content": "Our models expect IMU measurements as input. We synthesize IMU data following the approach proposed in DIP [14]. In summary, we place virtual sensors on the corresponding SMPL mesh vertices (left and right wrists, left and right pockets, and the head) and obtain joint rotations via limb orientations, while acceleration values are computed using finite differences. During training, we scale down the acceleration by a factor of \\(30m / s^2\\), such that its values are on a similar scale to orientations, for better learning. Of note, we do not normalize our IMU measurements to a root joint (e.g., the pelvis), as the number of available devices can vary." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.668, + 0.376, + 0.682 + ], + "angle": 0, + "content": "4.2 Global Translation Estimation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.686, + 0.483, + 0.838 + ], + "angle": 0, + "content": "The translation estimation networks require (1) binary labels for foot-ground contact states and (2) per-frame root velocity values. To generate foot-ground contact states, we assume that a foot in contact with the ground displays very little movement between frames. Therefore, when the movement of one foot between consecutive frames is less than a threshold \\( u \\), then we consider it to be contacting the ground. We set \\( u = 0.008 \\), following previous work [49]. To train \\( v_{e} \\), we require per-frame root velocities. Since the AMASS dataset provides root position data, we can compute root velocities as the coordinate difference of the root position between consecutive frames." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.85, + 0.371, + 0.866 + ], + "angle": 0, + "content": "4.3 Training Setup and Procedure" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.868, + 0.482, + 0.897 + ], + "angle": 0, + "content": "We train our models on a NVIDIA A40 GPU, which takes roughly a day for all modules and device-combinations. In total, our model has" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.914, + 0.163 + ], + "angle": 0, + "content": "\\(\\sim 6.7M\\) trainable parameters. Each module is trained separately using a batch size of 256 and the Adam optimizer [21] with a learning rate of \\(\\mathrm{lr} = 10^{-3}\\) for 80 epochs. We also apply a gradient clipping with norm of 1, to prevent the gradients from exploding." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.163, + 0.914, + 0.22 + ], + "angle": 0, + "content": "During training of \\(\\mathcal{F}^{\\theta}\\), \\(v_{e}\\), and \\(v_{f}\\), we add Gaussian noise with \\(\\sigma = 0.04\\) to the joint positions to prevent overfitting and deal with prediction errors from \\(\\mathcal{F}^{joint}\\). We empirically set \\(\\lambda = 10^{-5}\\) when training \\(\\mathcal{F}^{\\theta}\\), to encourage temporally smooth predictions." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.232, + 0.661, + 0.246 + ], + "angle": 0, + "content": "5 EVALUATION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.251, + 0.915, + 0.307 + ], + "angle": 0, + "content": "We systematically isolate and analyze the efficacy of MobilePoser across different datasets, evaluation metrics and protocols. We show both qualitative and quantitative results, and also run ablation studies to evaluate our translation estimation design choices." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.319, + 0.629, + 0.333 + ], + "angle": 0, + "content": "5.1 Datasets" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.337, + 0.914, + 0.365 + ], + "angle": 0, + "content": "We evaluate MobilePoser on three real-world, inertial datasets, summarized in Table 2:" + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.369, + 0.914, + 0.437 + ], + "angle": 0, + "content": "- DIP-IMU [14] contains data from 10 participants, collected using commercial-grade Xsens [45] IMUs at \\(60\\mathrm{Hz}\\). It includes a rich variety of activities such as arm raises, stretches, lunges, squats, and punches. However, DIP-IMU does not contain global translation data." + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.438, + 0.915, + 0.492 + ], + "angle": 0, + "content": "- TotalCapture [40] provides real IMU measurements with ground-truth pose and translation, captured using commercial Xsens IMUs at \\(60\\mathrm{Hz}\\). Following PIP [48], we re-calibrate the acceleration measurements to account for constant bias." + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.493, + 0.922, + 0.547 + ], + "angle": 0, + "content": "- IMUPoser [28] is collected from 10 participants using consumer-grade devices: an iPhone 11 Pro, Apple Watch Series 6, and AirPods, at \\(25\\mathrm{Hz}\\). It provides ground-truth pose and global translation data." + }, + { + "type": "list", + "bbox": [ + 0.542, + 0.369, + 0.922, + 0.547 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.565, + 0.776, + 0.581 + ], + "angle": 0, + "content": "5.2 Full-Body Pose Estimation" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.584, + 0.914, + 0.612 + ], + "angle": 0, + "content": "5.2.1 Evaluation Metrics. Like prior work, we use the following evaluation metrics for pose estimation (lower is better for all):" + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.615, + 0.914, + 0.643 + ], + "angle": 0, + "content": "- Mean Per Joint Rotation Error (MPJRE): Measure of mean angular error across all root aligned joints in degrees \\((^{\\circ})\\)." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.643, + 0.915, + 0.683 + ], + "angle": 0, + "content": "- Mean Per Joint Position Error (MPJPE): Measure of mean Euclidean distance error across all root aligned joints in centimeters (cm)." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.684, + 0.915, + 0.725 + ], + "angle": 0, + "content": "- Mean Per Joint Vertex Error (MPJVE): Measure of mean Euclidean distance error across all root aligned vertices of the SMPL body mesh in centimeters (cm)." + }, + { + "type": "text", + "bbox": [ + 0.541, + 0.725, + 0.915, + 0.753 + ], + "angle": 0, + "content": "- Mean Per Joint Jitter (Jitter): Measure of mean jerk across all body joints of the predicted motion in \\( m / s^3 \\)." + }, + { + "type": "list", + "bbox": [ + 0.541, + 0.615, + 0.915, + 0.753 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.756, + 0.915, + 0.785 + ], + "angle": 0, + "content": "We use MPJVE as our primary metric of evaluation for ease of comparison with prior work [28]." + }, + { + "type": "table", + "bbox": [ + 0.533, + 0.805, + 0.897, + 0.863 + ], + "angle": 0, + "content": "
DatasetCapture DeviceTranslationData FPS
DIP-IMUCommercial×60 Hz
TotalCaptureCommercial60 Hz
IMUPoserConsumer25 Hz
" + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.864, + 0.914, + 0.876 + ], + "angle": 0, + "content": "Table 2: Real-world IMU datasets for MobilePoser Evaluation." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.65, + 0.087 + ], + "angle": 0, + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + }, + { + "type": "header", + "bbox": [ + 0.671, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + }, + { + "type": "image", + "bbox": [ + 0.109, + 0.105, + 0.457, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.302, + 0.483, + 0.346 + ], + "angle": 0, + "content": "Figure 5: Comparison of MobilePoser's Full-Body Pose Estimation Error across different Evaluation Protocols on the DIP-IMU, IMUPoser and TotalCapture dataset respectively." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.37, + 0.483, + 0.412 + ], + "angle": 0, + "content": "5.2.2 Evaluation Protocol. We outline three evaluation protocols for training and fine-tuning to evaluate MobilePoser's efficacy across different data sources and noise profiles." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.415, + 0.483, + 0.441 + ], + "angle": 0, + "content": "- Base Model: We train our model on the synthetic data generated on the AMASS dataset." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.443, + 0.482, + 0.497 + ], + "angle": 0, + "content": "- Finetune DIP-IMU: Like prior work, we train on AMASS and then fine-tune on 8 DIP-IMU participants. The 2 holdout participants are used for testing the Finetune DIP-IMU model on the DIP-IMU dataset." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.499, + 0.482, + 0.553 + ], + "angle": 0, + "content": "- Finetune IMUPoser: We train on AMASS and fine-tune on the first 8 IMUPoser participants. The 2 holdout participants are used for testing the Finetune IMUPoser model on the IMUPoser dataset." + }, + { + "type": "list", + "bbox": [ + 0.11, + 0.415, + 0.483, + 0.553 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.562, + 0.483, + 0.687 + ], + "angle": 0, + "content": "5.2.3 Accuracy across Datasets. Figure 5 shows our full-body pose estimation accuracy for all three protocols across the three datasets listed in Section 5.1. Averaged across all three datasets, the MPJVE for the Base Model, Finetune DIP-IMU and Finetune IMUPoser protocols are 11.89, 11.73 and \\(11.33\\mathrm{cm}\\) respectively. It is interesting to note that the addition of commercial-grade IMU data (Finetune DIP-IMU) only improves accuracy by \\(1.3\\%\\) over the base model, while the addition of noisy IMU data from consumer devices (Finetune IMUPoser) results in a bigger improvement of \\(4.7\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.695, + 0.483, + 0.82 + ], + "angle": 0, + "content": "5.2.4 Accuracy across Activities. We further analyze results on different activities on the IMUPoser dataset, as it provides activity label meta-data. MobilePoser's accuracy generalizes across most everyday activity contexts: the error (MPJVE) for locomotion is 8.2 cm (walking 7.6 cm, jogging 8.8 cm), exercises is 10 cm (kicking: 7.5 cm, jumping jacks: 11.1 cm, boxing: 11.5 cm), sitting is 11.5 cm and freestyle motions such as tennis and basketball are 9.1 cm and 11.7 cm respectively. The accuracy degrades for postures with the user lying/facing down, e.g. push-ups have higher error of 16.1 cm." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.827, + 0.483, + 0.897 + ], + "angle": 0, + "content": "5.2.5 Comparison with prior work. To aid in direct comparison with prior work [14, 28, 48, 49], we now make use of the Finetune DIP-IMU evaluation protocol, that is training a base model on the synthetic IMU data from AMASS and fine-tuning it on the 8 participants from DIP-IMU dataset. Tables 1 and 3 offer a quantitative" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.695, + 0.483, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.541, + 0.103, + 0.888, + 0.19 + ], + "angle": 0, + "content": "
System# Inst. JointsMPJREMPJVEJitter
DIP617.2°11.23.62
TransPose612.8°7.40.95
PIP612.1°6.50.20
IMUPoser1-325.6°15.41.30
MobilePoser1-323.7°12.60.55
" + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.191, + 0.916, + 0.217 + ], + "angle": 0, + "content": "Table 3: Comparison with key prior work on the TotalCapture dataset." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.258, + 0.915, + 0.368 + ], + "angle": 0, + "content": "comparison against key prior work, evaluated on the DIP-IMU and TotalCapture, dataset respectively. Given that our system targets a very sparse configuration of IMUs (1-3), it is unsurprising that we perform worse than systems utilizing 6 IMUs, strategically placed around the body. On the DIP-IMU and TotalCapture dataset, compared to IMUPoser, which considers the same device-location combinations, we perform significantly better displaying a \\(12.4\\%\\) and \\(18.2\\%\\) decrease in vertex error respectively." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.368, + 0.915, + 0.535 + ], + "angle": 0, + "content": "On the IMUPoser dataset, Figure 7 (A) provides a detailed breakdown of accuracy for different on-body device locations. Averaging across the 1, 2 and 3 device conditions, MobilePoser outperforms IMUPoser by \\(24.1\\%\\), \\(14.2\\%\\) and \\(8.7\\%\\) respectively. Furthermore, Figure 7 (B) provides an accuracy breakdown for the instrumented and non-instrumented joints in comparison with IMUPoser. If a limb has an IMU placed on any part, we consider all the joints pertaining to it as instrumented joints, while the rest are marked as non-instrumented. MobilePoser is \\(18.1\\%\\) and \\(17.4\\%\\) better than IMUPoser for predicting instrumented and non-instrumented joints respectively. This can be seen in Figure 6 which depicts a visual comparison of our pose estimation with IMUPoser." + }, + { + "type": "image", + "bbox": [ + 0.546, + 0.553, + 0.885, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.864, + 0.915, + 0.892 + ], + "angle": 0, + "content": "Figure 6: Qualitative comparisons between our method and IMUPoser on the DIP-IMU and IMUPoser dataset." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.328, + 0.087 + ], + "angle": 0, + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + }, + { + "type": "header", + "bbox": [ + 0.868, + 0.076, + 0.913, + 0.087 + ], + "angle": 0, + "content": "Xu, et al." + }, + { + "type": "image", + "bbox": [ + 0.087, + 0.109, + 0.913, + 0.382 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.398, + 0.913, + 0.427 + ], + "angle": 0, + "content": "Figure 7: MPJVE comparison between IMUPoser and MobilePoser (our system) on the IMUPoser Dataset for: (A) Different on-body device combinations (B) Instrumented vs Non Instrumented joints." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.447, + 0.377, + 0.46 + ], + "angle": 0, + "content": "5.3 Global Translation Estimation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.465, + 0.483, + 0.561 + ], + "angle": 0, + "content": "5.3.1 Evaluation Protocol. We evaluate our Global Translation Estimation module on the TotalCapture and IMUPoser datasets, as DIP-IMU lacks translation data. Like prior work [48, 49], we use the Finetune DIP-IMU protocol (Section 5.2.2), that is we train on AMASS and fine-tune on 8 participants of DIP-IMU to track the Root Translation Error (Euclidean norm of the cumulative distance errors within 1 second)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.571, + 0.483, + 0.682 + ], + "angle": 0, + "content": "5.3.2 Accuracy across Datasets and Body Regions. On the Total-Capture and IMUPoser dataset, our mean root translation error across all device combinations is 27.55 and \\(17.63\\mathrm{cm}\\) respectively. Interestingly, for both IMUPoser and TotalCapture datasets, we observe only a slight decrease in error when increasing the number of devices from one to two \\((6.1\\%)\\) and no significant improvement \\((4.0\\%)\\) when increasing from two devices to three. Analysing the error across different body regions for the single device scenario" + }, + { + "type": "list", + "bbox": [ + 0.082, + 0.465, + 0.483, + 0.682 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.083, + 0.705, + 0.482, + 0.826 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.837, + 0.483, + 0.893 + ], + "angle": 0, + "content": "Figure 8: (A) Comparison of cumulative translation error for different instrumented joints on the IMUPoser and Total-Capture dataset. (B) Evaluation of cumulative distance errors with respect to time." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.448, + 0.916, + 0.53 + ], + "angle": 0, + "content": "(Figure 8) (A), we see that a device in the pocket has a much lower error \\((14.8\\mathrm{cm})\\) compared to that on the wrist \\((25.7\\mathrm{cm})\\) or the head \\((29.7\\mathrm{cm})\\). This can be attributed to the legs capturing most of the locomotion data during translation, resulting in marginal gains from sensors on the upper-body. Figure 8 (B) shows the the cumulative distance error over time." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.539, + 0.916, + 0.664 + ], + "angle": 0, + "content": "5.3.3 Ablation Study. We perform ablation studies to understand the impact of key components in our system and their effects on performance. At the core of our system lies a subtle yet powerful concept: higher-order digitization (e.g., body pose) improves lower-order digitizations (e.g., steps). To quantify this idea, we run an ablation study of our translation estimation technique using both IMU data and the corresponding full-body pose inferred from it versus using only IMU data. Figure 9 summarizes our results. Our IMU-only, direct regression has an error of \\(21.4\\mathrm{cm}\\) across both" + }, + { + "type": "image", + "bbox": [ + 0.554, + 0.683, + 0.876, + 0.852 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.864, + 0.915, + 0.893 + ], + "angle": 0, + "content": "Figure 9: Benefits of using high-order digitization (i.e., IMU inferred poses) for estimating global translation." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.65, + 0.087 + ], + "angle": 0, + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + }, + { + "type": "header", + "bbox": [ + 0.671, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.105, + 0.283, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.283, + 0.105, + 0.481, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.34, + 0.483, + 0.368 + ], + "angle": 0, + "content": "Figure 10: Example indoor navigation application where MobilePoser digitizes multiple users within an office space." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.395, + 0.481, + 0.422 + ], + "angle": 0, + "content": "datasets, while our integrated \\((\\mathrm{IMU} + \\mathrm{IMU}\\) inferred pose) approach decreases error by \\(29.4\\%\\) to \\(15.1~\\mathrm{cm}\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.423, + 0.483, + 0.589 + ], + "angle": 0, + "content": "Building on the multi-stage architecture, we further evaluate the impact of two additional components: jerk loss and physics refinement. These elements were designed to enhance motion smoothness and physical plausibility. For the IMUPoser dataset, the jerk loss reduces jitter by \\(23.9\\%\\) and translation error by \\(3.33\\%\\), but increases mean pose error by \\(0.05\\%\\). Further, the physics-aware refinement reduces jitter by \\(29.7\\%\\) and translation error by \\(0.4\\%\\), but increases the mean pose error by \\(0.7\\%\\). The negligible increase in mean pose error is expected, as it may occasionally over-smooth the motion. This phenomenon is also seen in the PIP [48]. We believe that significant improvements in jitter and translation far outweigh the minimal increase in pose error, resulting in a more realistic motion." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.597, + 0.483, + 0.736 + ], + "angle": 0, + "content": "5.3.4 Comparison with prior work. To the best of our knowledge, no other works have explored both full-body pose and translation from such a sparse set of commodity IMUs. IMUPoser [28], which also targets consumer devices, does not estimate global translation. On the TotalCapture dataset, TransPose (6 IMUs) has a translation error of \\(12.8\\mathrm{cm}\\) while that of MobilePoser is \\(19.9\\mathrm{cm}\\) when a single IMU device is placed in the pocket. Unsurprisingly, a commercial grade, 6 IMU-based system has higher accuracy due to their waist and knee mounted sensors, which capture larger ranges of locomotion compared to devices carried in the pocket." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.749, + 0.248, + 0.763 + ], + "angle": 0, + "content": "6 EXAMPLE USES" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.767, + 0.483, + 0.837 + ], + "angle": 0, + "content": "MobilePoser enables full-body pose estimation with global motion tracking using devices that users already own, opening up a wide range of novel applications. This section showcases three proof-of-concept applications in indoor navigation, gaming, and healthcare to illustrate MobilePoser's unique capabilities and potential impact." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.85, + 0.416, + 0.866 + ], + "angle": 0, + "content": "6.1 Indoor Localization and Navigation" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.868, + 0.482, + 0.897 + ], + "angle": 0, + "content": "To demonstrate MobilePoser's potential in this domain, we scan an office space using the PolyCam [34] LiDAR scanner app with" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.105, + 0.73, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.105, + 0.912, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.257, + 0.73, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.73, + 0.257, + 0.912, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.514, + 0.422, + 0.914, + 0.463 + ], + "angle": 0, + "content": "Figure 11: In this table tennis game users can move around the table freely and use their wrist-instrumented hand to control their racket." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.488, + 0.915, + 0.627 + ], + "angle": 0, + "content": "an Apple iPhone 15 Pro. As shown in Figure 10, multiple users walk through the virtual office space, with their interactions and movements seamlessly digitized and represented in real-time. Here, one user has a phone in their pocket and a watch on their wrist, while the other two only have a phone in their pocket. By leveraging the IMUs in these consumer devices, MobilePoser enables accurate indoor navigation and localization without the need for additional infrastructure or specialized hardware. This opens up exciting possibilities for applications such as indoor way finding, context-aware virtual assistants, and immersive virtual tours." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.64, + 0.787, + 0.656 + ], + "angle": 0, + "content": "6.2 Mobile Gaming Experiences" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.658, + 0.915, + 0.796 + ], + "angle": 0, + "content": "To showcase this potential, we developed a virtual table tennis game (Figure 11) that allows users to play remotely with others, similar to how Nintendo games are played in front of a TV. Each player has a phone in their pocket and a watch on the dominant (left) hand, which is controlling the racket. Players can freely move within their local space to control their avatars, adding a new level of physical interaction to the gaming experience. MobilePoser's ability to track full-body movements using everyday devices eliminates the need for specialized controllers, making immersive gaming experiences more accessible to a wider audience." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.809, + 0.729, + 0.823 + ], + "angle": 0, + "content": "6.3 Fitness and Wellness" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.827, + 0.914, + 0.897 + ], + "angle": 0, + "content": "MobilePoser has the potential to revolutionize fitness tracking and rehabilitation by providing accurate, real-time feedback on a user's movements and poses without the need for external sensors or camera setups. This enables users to monitor their exercise form, track progress, and receive personalized guidance using the devices" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.327, + 0.087 + ], + "angle": 0, + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + }, + { + "type": "header", + "bbox": [ + 0.868, + 0.077, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Xu, et al." + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.48, + 0.326 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.34, + 0.483, + 0.381 + ], + "angle": 0, + "content": "Figure 12: MobilePoser's full-body pose and locomotion can be used to automatically detect and count exercise repetitions, better estimate calories and monitor form." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.405, + 0.483, + 0.531 + ], + "angle": 0, + "content": "they already own. In this example (Figure 12), a user performs a workout routine while MobilePoser captures the session using the IMU data from the smartphone in the user's pocket. This not only allows the user to review their performance and track progress over time but also enables remote monitoring by fitness instructors or physical therapists. Moreover, MobilePoser's ability to track full-body movements facilitates interactive rehabilitation regimens [4] and other passive health sensing applications such as gait analysis [30] or hyperactivity detection [6], among others." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.543, + 0.24, + 0.556 + ], + "angle": 0, + "content": "7 OPEN SOURCE" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.561, + 0.483, + 0.645 + ], + "angle": 0, + "content": "To enable other researchers and practitioners to build upon our work, we release our pre-trained models, data pre-processing scripts, and model training code as open-source software at: https://github. com/SPICExLAB/MobilePoser. By making our work fully reproducible and extensible, we hope to accelerate research and development in the field of mobile motion capture using everyday devices." + }, + { + "type": "title", + "bbox": [ + 0.083, + 0.656, + 0.415, + 0.67 + ], + "angle": 0, + "content": "8 LIMITATIONS AND FUTURE WORK" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.675, + 0.483, + 0.827 + ], + "angle": 0, + "content": "While MobilePoser demonstrates promising results in estimating full-body pose and translation using minimal instrumentation, there are several limitations and opportunities for future work. First, as a purely inertial-based technique, MobilePoser's translation estimation is still susceptible to drift, particularly when devices deviate from their calibrated positions. This can occur when users wear loose clothing, causing the phone in the pocket to move around and resulting in orientation changes. To address this issue, future work could explore re-calibration techniques based on stationary poses or leverage additional sensory information, such as GPS, UWB or visual odometry, to correct for drift." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.827, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Second, akin to prior wor, our evaluation has limitations of being tested on lab collected datasets. All the test datasets (DIP, TotalCapture, IMUPoser) were collected in lab settings due to the need for an accurate external ground truth motion capture system. Although we empirically demonstrate that MobilePoser works in real-world" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.108, + 0.912, + 0.135 + ], + "angle": 0, + "content": "settings (as seen in the accompanying video), we acknowledge the need for future datasets captured in-the-wild." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.135, + 0.915, + 0.273 + ], + "angle": 0, + "content": "Another limitation of MobilePoser, much like other prior works [14, 28, 48, 49], is the need for a calibration step. Currently, users first stand in a T-pose, which aligns the IMU data with the training data based on the SMPL kinematic model. While this calibration process is acceptable for some use cases, such as gaming, it may be less desirable for applications that demand seamless interactions, like indoor navigation. Future work could investigate more natural and unobtrusive calibration procedures, such as detecting common poses like standing with arms by the side using UWB, similar to SmartPoser [11]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.274, + 0.913, + 0.33 + ], + "angle": 0, + "content": "In conclusion, while MobilePoser presents a significant step forward in enabling full-body pose and translation estimation using everyday devices, there remain several avenues for future research to extend the capabilities of this approach." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.343, + 0.665, + 0.356 + ], + "angle": 0, + "content": "9 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.361, + 0.915, + 0.542 + ], + "angle": 0, + "content": "In this paper, we present MobilePoser, a real-time, on-device system for estimating full-body pose and translation using IMUs in consumer mobile devices (phones, watches, earbuds). By leveraging a multi-stage approach that combines data-driven learning and physics-based optimization, MobilePoser achieves state-of-the-art accuracy while remaining lightweight and efficient. Our extensive evaluation on public datasets demonstrates clear improvements over prior work, both in terms of full-body pose estimation accuracy and enabling novel global translation estimation. Furthermore, we showcase the potential of MobilePoser through a series of proof-of-concept applications in gaming, fitness, and indoor navigation, highlighting its ability to enable new and immersive experiences using the devices people already own." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.555, + 0.714, + 0.568 + ], + "angle": 0, + "content": "ACKNOWLEDGMENTS" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.573, + 0.915, + 0.643 + ], + "angle": 0, + "content": "We thank Jianru Ding from the University of Chicago and Zeya Chen from the Institute of Design, Illinois Institute of Technology for helping film the video. Vasco Xu's and Henry Hoffmann's work on this project is supported by NSF (CCF-1823032 and CNS-1956180)." + }, + { + "type": "title", + "bbox": [ + 0.517, + 0.656, + 0.634, + 0.67 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.673, + 0.914, + 0.693 + ], + "angle": 0, + "content": "[1] [n. d]. PlayStation VR. https://www.playstation.com/en-us/explore/playstationvr/." + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.694, + 0.731, + 0.704 + ], + "angle": 0, + "content": "[2] 2023. HTC Vive. https://www.vive.com." + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.705, + 0.912, + 0.723 + ], + "angle": 0, + "content": "[3] Karan Ahuja. 2024. Practical and Rich User Digitization. arXiv:2403.00153 [cs.HC] https://arxiv.org/abs/2403.00153" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.724, + 0.914, + 0.763 + ], + "angle": 0, + "content": "[4] Karan Ahuja, Sven Mayer, Mayank Goel, and Chris Harrison. 2021. Pose-on-the-go: Approximating user pose with smartphone sensor fusion and inverse kinematics. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 1-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.765, + 0.914, + 0.805 + ], + "angle": 0, + "content": "[5] Karan Ahuja, Vivian Shen, Cathy Mengying Fang, Nathan Riopelle, Andy Kong, and Chris Harrison. 2022. Controllerpose: inside-out body capture with VR controller cameras. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems. 1-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.806, + 0.914, + 0.855 + ], + "angle": 0, + "content": "[6] Riku Arakawa, Karan Ahuja, Kristie Mak, Gwendolyn Thompson, Sam Shaaban, Oliver Lindhiem, and Mayank Goel. 2023. LemurDx: Using Unconstrained Passive Sensing for an Objective Measurement of Hyperactivity in Children with no Parent Input. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 2 (2023), 1-23." + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.855, + 0.914, + 0.895 + ], + "angle": 0, + "content": "[7] Riku Arakawa, Bing Zhou, Gurunandan Krishnan, Mayank Goel, and Shree K Nayar. 2023. MI-Poser: Human Body Pose Tracking Using Magnetic and Inertial Sensor Fusion with Metal Interference Mitigation. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 3 (2023), 1-24." + }, + { + "type": "list", + "bbox": [ + 0.522, + 0.673, + 0.914, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.65, + 0.087 + ], + "angle": 0, + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + }, + { + "type": "header", + "bbox": [ + 0.672, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.109, + 0.482, + 0.14 + ], + "angle": 0, + "content": "[8] Rayan Armani, Changlin Qian, Jiaxi Jiang, and Christian Holz. 2024. Ultra Inertial Poser: Scalable Motion Capture and Tracking from Sparse Inertial Sensors and Ultra-Wideband Ranging. In ACM SIGGRAPH 2024 Conference Papers. 1-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.09, + 0.14, + 0.483, + 0.19 + ], + "angle": 0, + "content": "[9] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. 2016. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. Springer, 561-578." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.19, + 0.483, + 0.23 + ], + "angle": 0, + "content": "[10] Nathan Devrio and Chris Harrison. 2022. discoBand: Multiview Depth-Sensing Smartwatch Strap for Hand, Body and Environment Tracking. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.23, + 0.482, + 0.271 + ], + "angle": 0, + "content": "[11] Nathan DeVrio, Vimal Mollyn, and Chris Harrison. 2023. SmartPoser: Arm Pose Estimation with a Smartphone and Smartwatch Using UWB and IMU Data. In Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology. 1-11." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.271, + 0.423, + 0.282 + ], + "angle": 0, + "content": "[12] Roy Featherstone. 2014. Rigid body dynamics algorithms. Springer." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.282, + 0.482, + 0.322 + ], + "angle": 0, + "content": "[13] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. 2023. Humans in 4d: Reconstructing and tracking humans with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 14783-14794." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.322, + 0.482, + 0.362 + ], + "angle": 0, + "content": "[14] Yinghao Huang, Manuel Kaufmann, Emre Aksan, Michael J Black, Otmar Hilliges, and Gerard Pons-Moll. 2018. Deep inertial pose: Learning to reconstruct human pose from sparse inertial measurements in real time. ACM Transactions on Graphics (TOG) 37, 6 (2018), 1-15." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.362, + 0.482, + 0.402 + ], + "angle": 0, + "content": "[15] Fan Jiang, Xubo Yang, and Lele Feng. 2016. Real-time full-body motion reconstruction and recognition for off-the-shelf VR devices. In Proceedings of the 15th ACM SIGGRAPH Conference on Virtual-Reality Continuum and Its Applications in Industry-Volume 1, 309–318." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.482, + 0.442 + ], + "angle": 0, + "content": "[16] Jiaxi Jiang, Paul Streli, Huajian Qiu, Andreas Fender, Larissa Laich, Patrick Snape, and Christian Holz. 2022. Avatarposer: Articulated full-body pose tracking from sparse motion sensing. In European Conference on Computer Vision. Springer, 443-460." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.442, + 0.482, + 0.483 + ], + "angle": 0, + "content": "[17] Yifeng Jiang, Yuting Ye, Deepak Gopinath, Jungdam Won, Alexander W Winkler, and C Karen Liu. 2022. Transformer Inertial Poser: Real-time human motion reconstruction from sparse IMUs with simultaneous terrain generation. In SIGGRAPH Asia 2022 Conference Papers. 1-9." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.483, + 0.482, + 0.523 + ], + "angle": 0, + "content": "[18] Haojian Jin, Zhijian Yang, Swarun Kumar, and Jason I Hong. 2018. Towards wearable everyday body-frame tracking using passive RFIDs. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 4 (2018), 1-23." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.523, + 0.482, + 0.553 + ], + "angle": 0, + "content": "[19] Daehwa Kim and Chris Harrison. 2022. Etherpose: Continuous hand pose tracking with wrist-worn antenna impedance characteristic sensing. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.553, + 0.482, + 0.593 + ], + "angle": 0, + "content": "[20] David Kim, Otmar Hilliges, Shahram Izadi, Alex D Butler, Jiawen Chen, Jason Oikonomidis, and Patrick Olivier. 2012. Digits: freehand 3D interactions anywhere using a wrist-worn gloveless sensor. In Proceedings of the 25th annual ACM symposium on User interface software and technology. 167-176." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.593, + 0.482, + 0.613 + ], + "angle": 0, + "content": "[21] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.613, + 0.482, + 0.654 + ], + "angle": 0, + "content": "[22] Alexander Kyu, Hongyu Mao, Junyi Zhu, Mayank Goel, and Karan Ahuja. 2024. EITPose: Wearable and Practical Electrical Impedance Tomography for Continuous Hand Pose Estimation. In Proceedings of the CHI Conference on Human Factors in Computing Systems. 1-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.654, + 0.482, + 0.684 + ], + "angle": 0, + "content": "[23] Jiye Lee and Hanbyul Joo. 2024. Mocap Everyone Everywhere: Lightweight Motion Capture With Smartwatches and a Head-Mounted Camera. arXiv preprint arXiv:2401.00847 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.684, + 0.482, + 0.713 + ], + "angle": 0, + "content": "[24] Yilin Liu, Shijia Zhang, and Mahanth Gowda. 2021. NeuroPose: 3D hand pose tracking using EMG wearables. In Proceedings of the Web Conference 2021. 1471-1482." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.713, + 0.482, + 0.744 + ], + "angle": 0, + "content": "[25] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. 2015. SMPL: A Skinned Multi-Person Linear Model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34, 6 (Oct. 2015), 248:1-248:16." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.744, + 0.482, + 0.784 + ], + "angle": 0, + "content": "[26] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. 2019. AMASS: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision. 5442-5451." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.784, + 0.338, + 0.795 + ], + "angle": 0, + "content": "[27] Microsoft Corporation. [n.d.]. Microsoft Kinect." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.795, + 0.482, + 0.835 + ], + "angle": 0, + "content": "[28] Vimal Mollyn, Riku Arakawa, Mayank Goel, Chris Harrison, and Karan Ahuja. 2023. IMUPoser: Full-Body Pose Estimation using IMUs in Phones, Watches, and Earbuds. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.835, + 0.416, + 0.845 + ], + "angle": 0, + "content": "[29] NaturalPoint, Inc. [n.d.]. OptiTrack. https://www.optitrack.com." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.845, + 0.482, + 0.885 + ], + "angle": 0, + "content": "[30] Shu Nishiguchi, Minoru Yamada, Koutatsu Nagai, Shuhei Mori, Yuu Kajiwara, Takuya Sonoda, Kazuya Yoshimura, Hiroyuki Yoshitomi, Hiromu Ito, Kazuya Okamoto, et al. 2012. Reliability and validity of gait analysis by android-based smartphone. Telemedicine and e-Health 18, 4 (2012), 292–296." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.109, + 0.483, + 0.885 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.109, + 0.914, + 0.13 + ], + "angle": 0, + "content": "[31] Northern Digital Inc. 2020. travSTAR. https://www.ndigital.com/msci/products/drivebay-trakstar." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.13, + 0.914, + 0.171 + ], + "angle": 0, + "content": "[32] Mathias Parger, Joerg H Mueller, Dieter Schmalstieg, and Markus Steinberger. 2018. Human upper-body inverse kinematics for increased embodiment in consumer-grade virtual reality. In Proceedings of the 24th ACM symposium on virtual reality software and technology. 1-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.171, + 0.897, + 0.181 + ], + "angle": 0, + "content": "[33] Polhemus. 2020. Polhemus Motion Capture System. https://polhemus.com/." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.181, + 0.757, + 0.191 + ], + "angle": 0, + "content": "[34] PolyCam. [n.d.]. PolyCam. https://poly.cam/." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.191, + 0.913, + 0.221 + ], + "angle": 0, + "content": "[35] Jose Luis Ponton, Haoran Yun, Andreas Aristidou, Carlos Andujar, and Nuria Pelechano. 2023. SparsePoser: Real-time Full-body Motion Reconstruction from Sparse Data. ACM Transactions on Graphics 43, 1 (2023), 1-14." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.221, + 0.913, + 0.251 + ], + "angle": 0, + "content": "[36] Jathushan Rajasegaran, Georgios Pavlakos, Angjoo Kanazawa, and Jitendra Malik. 2021. Tracking people with 3D representations. arXiv preprint arXiv:2111.07868 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.251, + 0.913, + 0.282 + ], + "angle": 0, + "content": "[37] Nirupam Roy, He Wang, and Romit Roy Choudhury. 2014. I am a smartphone and i can tell my user's walking direction. In Proceedings of the 12th annual international conference on Mobile systems, applications, and services. 329-342." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.282, + 0.913, + 0.312 + ], + "angle": 0, + "content": "[38] Takaki Shiratori, Hyun Soo Park, Leonid Sigal, Yaser Sheikh, and Jessica K Hodgins. 2011. Motion capture from body-mounted cameras. In ACM SIGGRAPH 2011 papers. 1-10." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.312, + 0.913, + 0.332 + ], + "angle": 0, + "content": "[39] Ivan E Sutherland. 1968. A head-mounted three dimensional display. In Proceedings of the December 9-11, 1968, fall joint computer conference, part I. 757-764." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.332, + 0.913, + 0.362 + ], + "angle": 0, + "content": "[40] Matthew Trumble, Andrew Gilbert, Charles Malleson, Adrian Hilton, and John Collomosse. 2017. Total capture: 3d human pose estimation fusing video and inertial sensors. In Proceedings of 28th British Machine Vision Conference. 1-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.362, + 0.853, + 0.372 + ], + "angle": 0, + "content": "[41] Vicon Motion Systems Ltd. [n.d.]. Vicon. https://www.vicon.com." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.372, + 0.913, + 0.402 + ], + "angle": 0, + "content": "[42] Daniel Vlasic, Rolf Adelsberger, Giovanni Vannucci, John Barnwell, Markus Gross, Wojciech Matusik, and Jovan Popovic. 2007. Practical motion capture in everyday surroundings. ACM transactions on graphics (TOG) 26, 3 (2007), 35-es." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.402, + 0.913, + 0.432 + ], + "angle": 0, + "content": "[43] Timo Von Marcard, Bodo Rosenhahn, Michael J Black, and Gerard Pons-Moll. 2017. Sparse inertial poser: Automatic 3d human pose estimation from sparse imus. In Computer graphics forum, Vol. 36. Wiley Online Library, 349-360." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.432, + 0.913, + 0.473 + ], + "angle": 0, + "content": "[44] Erwin Wu, Ye Yuan, Hui-Shyong Yeo, Aaron Quigley, Hideki Koike, and Kris M Kitani. 2020. Back-hand-posed: 3d hand pose estimation for a wrist-worn camera via dorsum deformation network. In Proceedings of the 33rd Annual ACM Symposium on User Interface Software and Technology. 1147–1160." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.473, + 0.913, + 0.492 + ], + "angle": 0, + "content": "[45] Xsens Technologies B.V. [n.d.]. Xsens IMU Systems. https://www.xsens.com. Accessed: 2024-03-07." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.492, + 0.913, + 0.522 + ], + "angle": 0, + "content": "[46] Hang Yan, Qi Shan, and Yasutaka Furukawa. 2018. RIDI: Robust IMU double integration. In Proceedings of the European conference on computer vision (ECCV), 621-636." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.522, + 0.913, + 0.563 + ], + "angle": 0, + "content": "[47] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Vladislav Golyanik, Shaohua Pan, Christian Theobalt, and Feng Xu. 2023. EgoLocate: Real-time Motion Capture, Localization, and Mapping with Sparse Body-mounted Sensors. arXiv preprint arXiv:2305.01599 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.563, + 0.913, + 0.613 + ], + "angle": 0, + "content": "[48] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Soshi Shimada, Vladislav Golyanik, Christian Theobalt, and Feng Xu. 2022. Physical inertial poser (pip): Physics-aware real-time human motion tracking from sparse inertial sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 13167-13178." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.613, + 0.913, + 0.643 + ], + "angle": 0, + "content": "[49] Xinyu Yi, Yuxiao Zhou, and Feng Xu. 2021. Transpose: Real-time 3d human translation and pose estimation with six inertial sensors. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.643, + 0.913, + 0.683 + ], + "angle": 0, + "content": "[50] Yang Zhang, Chouchang Yang, Scott E Hudson, Chris Harrison, and Alanson Sample. 2018. Wall++ room-scale interactive and context-aware sensing. In Proceedings of the 2018 chi conference on human factors in computing systems. 1-15." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.683, + 0.913, + 0.723 + ], + "angle": 0, + "content": "[51] Mingmin Zhao, Tianhong Li, Mohammad Abu Alsheikh, Yonglong Tian, Hang Zhao, Antonio Torralba, and Dina Katabi. 2018. Through-wall human pose estimation using radio signals. In Proceedings of the IEEE conference on computer vision and pattern recognition. 7356-7365." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.723, + 0.913, + 0.764 + ], + "angle": 0, + "content": "[52] Li'an Zhuo, Jian Cao, Qi Wang, Bang Zhang, and Liefeng Bo. 2023. Towards Stable Human Pose Estimation via Cross-View Fusion and Foot Stabilization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 650-659." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.109, + 0.914, + 0.764 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_origin.pdf b/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..fcc371ed076227d414432bc9f809aadba2026968 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/7b5fffe0-9106-451e-9d83-f8c53a7c05c0_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f976aae5f07560efa5958e2d0e4594d8151c57732ec98191e0c42c0987c1903d +size 13045023 diff --git a/data/2025/2504_12xxx/2504.12492/full.md b/data/2025/2504_12xxx/2504.12492/full.md new file mode 100644 index 0000000000000000000000000000000000000000..0e238414ac802feb649ecc3606a28351d49bea97 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/full.md @@ -0,0 +1,420 @@ +# MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices + +Vasco Xu + +University of Chicago + +Chicago, USA + +vascoxu@uchicago.edu + +Henry Hoffmann + +University of Chicago + +Chicago, USA + +hankhoffmann@cs.uchicago.edu + +Chenfeng Gao + +Northwestern University + +Evanston, USA + +chenfenggao2029@u.northwestern.edu + +Karan Ahuja + +Northwestern University + +Evanston, USA + +kahuja@northwestern.edu + +![](images/21cb0fed5824f2b52eaead0f3b91c03397de145b60f7fc3fc56c8e8b6dc9ec69.jpg) +Figure 1: MobilePoser uses any subset of consumer mobile devices (phones, watches, earbuds) available to estimate full-body pose and global translation. + +![](images/3c2eb974ddb24edaa9597fd4b66033fe86777991e12341a6a7db51af8002f3c2.jpg) + +# ABSTRACT + +There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude + +with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few. + +# CCS CONCEPTS + +- Human-centered computing $\rightarrow$ Ubiquitous and mobile computing. + +# KEYWORDS + +Motion capture, sensors, inertial measurement units, mobile devices + +# ACM Reference Format: + +Vasco Xu, Chenfeng Gao, Henry Hoffmann, and Karan Ahuja. 2024. MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices. In The 37th Annual ACM Symposium on User Interface Software and Technology (UIST '24), October 13–16, 2024, Pittsburgh, PA, USA. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3654777.3676461 + +# 1 INTRODUCTION + +Full-body motion capture has numerous applications in gaming, fitness, and virtual and augmented reality (VR/AR), enabling immersive experiences and context-aware interactions. While vision-based approaches for 3D human pose estimation have shown great + +![](images/696e3fe3b988e348657a5ae71dc830e60ea0483d72b1488924a444e669644195.jpg) +Figure 2: Real-time global pose estimation powered by MobilePoser: (A) Person with smartwatch (left wrist) waving their hands. (B) Person with smartwatch (left wrist) performing jumping jacks. (C) Person wearing a smartwatch (left wrist) and carrying a phone in their right pocket running. + +![](images/0756f6e551704363b0a80d0018307791429a28468cb1415b01872fdd090bcd17.jpg) + +![](images/245ac487888e82f4f86eed6cb209cc3d96f68466dd6718a8c94d350761353cc9.jpg) + +![](images/1b2a7df0cabf3fc0e25cb50f472835af79ab269436848ca26ae321703af8357f.jpg) + +![](images/3169657663aa0b2619685cc0f22df97a38dbc5e3520318143006da927e02c2d9.jpg) + +promise, they require subjects to be within the camera's field of view, limiting their practicability for mobile and on-the-go applications. In contrast, inertial measurement unit (IMU) based techniques offer an attractive alternative, enabling less intrusive and occlusion-free user digitization [3]. + +Commercial systems such as Xsens [45] use up to 17 special-purpose sensors to provide highly accurate pose estimations. However, such approaches are intrusive, making them undesirable for everyday use. Consequently, there has been a trend towards minimizing instrumentation. Sparse inertial pose capture methods, such as TransPose [49] and DIP [14], use 6 IMUs to achieve a balance between accuracy and practicality. Yet, these methods still require expensive and special-purpose IMUs attached to specific body joints. To enable full-body motion tracking without any external infrastructure, IMUPoser [28] leverages IMUs in devices we already carry around with us, namely smartphones, smartwatches, and earbuds. These commodity devices, however, use lower-fidelity IMUs, which compromises online performance, temporal consistency, and global translation estimation. + +In this work, we present MobilePoser, a real-time user digitization technique that tracks both poses and global movement (referred to as translation) using consumer devices (Figure 1) such as watches, phones and earbuds. To enable on-the-go motion tracking without any external infrastructure, we must address a set of unique challenges. First, the number of instrumented points is dynamically changing and sparse (at most three devices, with as few as one), making the problem highly under-constrained. Second, IMUs do not directly measure positional data, making global translation tracking non-trivial. Additionally, noise and drift from the low-cost IMUs found in commodity devices complicates pose and translation + +estimation. Finally, such a system should operate directly on-device for real-time use, anywhere, anytime. + +MobilePoser tackles these challenges by employing a multi-stage approach. For pose estimation, it utilizes a deep neural network (DNN) to predict full-body pose from the available IMU data, followed by a physics-based optimization step to ensure spatiotemporal consistency and plausible kinematics. This greatly helps resolve ambiguous instrumented joint motion profiles, such as differentiating between waving (Figure 2 A) versus jumping jacks (Figure 2 B) from only a single smartwatch on the wrist. To aid in generalizability, the model is trained on a large dataset of synthesized IMU measurements generated from high-quality motion capture (MoCap) data. For global translation estimation, MobilePoser employs a hybrid approach that fuses predictions from a foot contact-based method and a DNN-based method that directly regresses the root joint velocity. This combination enables accurate and robust translation estimation, even in challenging scenarios where both feet are in motion together (Figure 2 C). Importantly, MobilePoser is optimized to run on-device, achieving real-time performance of 60 frames per second on a smartphone (iPhone 15 Pro), making it suitable for mobile applications. + +In summary, MobilePoser makes the following key contributions: + +(1) It presents a novel framework for inertial translation estimation using consumer devices, enabling accurate tracking of global movement without specialized hardware. +(2) It achieves state-of-the-art full-body pose estimation across various on-body configurations of commodity IMU devices, demonstrating robust performance with as few as one and up to three wearable devices. +(3) It provides an open-source implementation that runs in real-time on edge devices, making it accessible and practical for widespread use. + +
System# Inst. JointsFPSConsumer DeviceTranslationMPJVE (cm)Jitter (102m/s3)
Xsens [45]17120×--
SIP [43]660×7.73.8
DIP [14]629××8.930.13
TransPose [49]690×7.11.4
PIP [48]660×5.90.24
IMUPoser [28]1-325×12.11.9
MobilePoser (our work)1-36010.60.97
+ +Table 1: Comparison with key prior work on the DIP-IMU dataset. + +# 2 RELATED WORK + +# 2.1 User Digitization with External Sensors + +Commercial motion capture systems such as OptiTrack [29] and Vicon [41] use specialized hardware, such as multiple calibrated high-speed infrared cameras, to track retroreflective markers attached to a user's body. Such setups are commonly used in games, movies and character animations that require millimeter accuracy and are the gold standard of motion capture. The expensive infrastructure required by commercial systems, makes them impractical for everyday use. Therefore, much research has been devoted to instrumentation-free approaches using monocular cameras. Such approaches generally rely on RGB [9, 13, 36] or depth [27] cameras based computer vision techniques to predict body pose. + +There also exists specialized external hardware for pose tracking in Extended Reality (XR). For example, the HTC Vive [2], PlayStation VR [1] and Oculus Rift [32] track the head, handheld controllers and other limb-borne accessories using external sensor base stations for Virtual Reality (VR) applications. The un-sensed joints are estimated with inverse kinematics [15] or learning-based methods [16, 35]. Other non-optical external approaches for pose estimation include capacitive sensing [50], magnetic fields [31, 33], RF [51], and mechanical linkages [39]. + +# 2.2 User Digitization with non-IMU Worn Sensors + +Wearable sensors provide a portable and flexible alternative to external sensors. For example, MI-Poser [7] uses magnetic tracking in wristbands and AR glasses to estimate upper-body poses. Other works have explored wrist-worn cameras [20, 44], EMG sensors [24], EIT sensors [22], wrist-worn antennas [19] and depth sensor armbands [10]. However, these works focus solely on capturing the motion of specific body parts (e.g., wrist or upper-body). + +To capture full-body motion, a popular approach is to use body-mounted cameras coupled with computer vision techniques [5, 38]. Other works have explored different sensor technologies such as ultrasonic sensors [42] and RFID [18]. Nevertheless, these works require users to wear sensors they do not already have. Pose-On-The-Go [4] addresses this by estimating full-body pose via extreme sensor fusion, leveraging a phone's front and rear cameras, thus requiring no special instrumentation. However, its computationally expensive and relies heavily on heuristics to power body poses, often resulting in unnatural motions. MobilePoser differentiates itself by focusing on full-body pose estimation using power-efficient + +IMUs already found in consumer devices, such as smartphones, smartwatches, and earbuds. + +# 2.3 User Digitization with IMU Worn Sensors + +Commercial motion capture systems, such as Xsens [45], use a large number of inertial sensors (typically 17) strapped to the body to provide high-quality motion capture. These setups consist of homogeneous, high-grade IMUs that are calibrated for noise and have known positions on the body, resulting in a less ill-posed problem compared to using sparse, heterogeneous sensors. However, such an approach is highly inconvenient and intrusive for everyday use. + +To address this limitation, researchers have explored reconstructing human motions from a reduced number of sensors. Works such as SIP [43], DIP [14], PIP [48], TIP [17], and TransPose [49] have demonstrated the feasibility of using only 6 commercial-grade Xsens IMU sensors for full-body motion capture. Works have further explored integrating other input modalities (e.g. UWB [8] and egocentric images [47]) in addition to the 6 IMUs for increased performance. All these approaches leverage the homogeneity and known calibrated positions of the sensors to achieve accurate pose estimation. However, even 6 sensors can be cumbersome for on-the-go applications, especially those that require passive sensing. + +Recent research has investigated even sparser IMU configurations using commodity devices. IMUPoser [28], which is most closely related to our work, performs pose estimation using any combination of smartphone, smartwatch, and earbuds. While IMUPoser tackles the challenges of heterogeneous sensor quality for pose estimation, it lacks global translation due to IMU noise and drift, and contains unrealistic spatio-temporal motion artifacts. Additionally, IMUPoser runs on a laptop at $25\mathrm{Hz}$ , limiting its practicality for real-time mobile applications. + +In contrast, MobilePoser addresses these limitations by demonstrating improved pose estimation accuracy on widely used benchmarks while also estimating global translation (see Table 1). Furthermore, our system is designed to run fully on-device, achieving real-time performance of 60 fps on edge mobile devices. This enables MobilePoser to provide a more practical and accessible solution for on-the-go motion capture using commodity devices. + +# 3 MOBILEPOSER + +Estimating a user's full-body pose from a sparse set of IMU observations is a severely under-constrained problem as it aims to infer a high-dimensional quantity, i.e., the full-body pose, from low-dimensional observations that only capture partial motion at each instrumented point. Moreover, multiple possible solutions could + +![](images/82d7e3785f82d268bc65de4155687f47ebd6e5f7505f1700e0b66ace8a9d59c2.jpg) +Figure 3: MobilePoser system overview. MobilePoser accepts any available subset of IMU data from the user and masks absent devices by setting their values to zero. The IMU data is then fed into two main modules: (1) Pose Estimation, which first estimates joint positions followed by joint rotations, and (2) Translation Estimation, which combines foot-ground contact probabilities with a direct neural network-based approach to regress global velocity. Finally, a Physics Optimizer refines the predicted joint rotations and global translation to ensure they satisfy physical constraints. + +explain the observed data, making it challenging to determine the correct pose. To tackle these challenges, we introduce MobilePoser, a system that leverages data-driven learning and physics-based optimization to estimate accurate and plausible full-body poses and global translations from sparse IMU inputs. Figure 3 provides an overview of our pipeline, which we describe in detail in the following sections. + +# 3.1 System Input + +MobilePoser takes as input acceleration and orientation readings from IMUs across any subset of three consumer devices: smartphones, smartwatches, and earbuds. Each of these devices can be placed at different body locations, resulting in various possible combinations. For instance, a smartphone can be stored in the left or right pocket, held in the left or right hand, placed next to the head during a call, or not carried by the user at all. Similarly, smartwatches can be worn on either wrist or not worn at all, while earbuds can be worn, placed in a charging case stored in either pocket, or not carried by the user. + +Following IMUPoser [28], we consider 24 plausible device-location combinations across five body locations: right pocket, left pocket, right wrist, left wrist, and head. These combinations cover the various ways users might carry or wear their devices throughout the day. Regardless of the input device combination, our model expects IMU data from the five predefined body locations. + +The IMU signal at each location consists of acceleration (3 values) and orientation (a $3 \times 3$ rotation matrix), resulting in a total of 12 IMU values per location. Across all five locations, this yields an input vector $x \in \mathbb{R}^{60}$ . However, since at any given time only a subset of 1-3 devices may be present, data from absent devices is masked and set to zero. This masking approach allows us to build a unified model that can handle the varying number of available devices and their changing on-body location seamlessly. This further eliminates + +the need for training separate models for each possible combination, making the system more practical and efficient. + +# 3.2 Full-Body Pose Estimation + +To learn a mapping from IMU input to full-body pose, we employ a data-driven, multi-stage neural network approach. Specifically, our pose estimation network consists of two submodules: Joint predictor $(\mathcal{F}^{joint})$ and Rotation predictor $(\mathcal{F}^{\theta})$ . More specifically, $\mathcal{F}^{joint}$ estimates joint positions as an intermediate task and $\mathcal{F}^{\theta}$ solves for the joint angle orientations. Both submodules use a bidirectional LSTM (bi-LSTM), to model both spatial and temporal information [14]. We input data into both submodules in a sliding-window fashion with window length $N$ . + +3.2.1 Joint Pose Estimation $(\mathcal{F}^{joint})$ . This module estimates the joint positions from a sequence of IMU measurements. We explicitly estimate joint positions as an intermediate step, as it helps extract useful information from linear accelerations due to its linear correlation with joint positions [49]. The input to $\mathcal{F}^{joint}$ is $x^{imu}(t) = [x_{t-N}, \ldots, x_t]$ , where $t$ is the current time step and $N$ is the time window length. The output are the root (pelvis) relative 3D positions of the 24 SMPL body joints [25] $\pmb{p}(t) = [\pmb{p}_{t-N}, \ldots, \pmb{p}_t] \in \mathbb{R}^{N \times 72}$ . The loss function used to train this network is: + +$$ +\mathcal {L} _ {\text {j o i n t}} = \left\| \mathbf {p} - \mathbf {p} _ {G T} \right\| _ {2} ^ {2} \tag {1} +$$ + +where the subscript $GT$ denotes the ground truth and $p$ represents the full-body SMPL joint positions. + +3.2.2 Joint Rotation and Body Mesh Estimation $(\mathcal{F}^{\theta})$ . Here we employ a neural kinematic estimator to regress joint rotations from the previously estimated positions. We concatenate the joint coordinates from $\mathcal{F}^{joint}$ with IMU measurements, which serves as the input to $\mathcal{F}^{\theta}$ . Note, while the SMPL body encodes 24 joints, only 18 are relevant from a rotation prediction perspective as the fingers, wrist and toes are independent of the on-body IMUs and + +are hence set to identity rotation matrices [49]. The outputs of the network are the 18 root relative joint orientations represented as 6D rotations: $\pmb{\theta}(t) = [\pmb{\theta}_{t-N},\dots,\pmb{\theta}_t] \in \mathbb{R}^{N \times 108}$ . + +Our joint rotation loss consists of three terms: $\mathcal{L}_{ori}$ , $\mathcal{L}_{pos}$ , $\mathcal{L}_{jerk}$ . The loss term $\mathcal{L}_{ori}$ is a standard L2 loss from the ground truth joint rotations. The term $\mathcal{L}_{pos}$ penalizes error accumulating along the kinematic chain. Finally, $\mathcal{L}_{jerk}$ promotes temporally smooth predictions, where $jerk(\theta) = \theta_{t-3} + 3\theta_{t-2} - 3\theta_{t-1} + \theta_t$ is a function that computes the jerk of a signal $\theta$ at time step $t$ , penalizing the deviation between neighboring frames [49]. + +Our combined joint rotation loss function can be represented as, + +$$ +\mathcal {L} _ {\theta} = \mathcal {L} _ {\text {o r i}} + \mathcal {L} _ {\text {p o s}} + \lambda \mathcal {L} _ {\text {j e r k}} \tag {2} +$$ + +$$ +\mathcal {L} _ {\text {o r i}} = \left\| \theta - \theta_ {G T} \right\| _ {2} ^ {2} \tag {3} +$$ + +$$ +\mathcal {L} _ {p o s} = \left\| \mathrm {F K} (\theta) - \mathbf {p} _ {G T} \right\| _ {2} ^ {2} \tag {4} +$$ + +$$ +\mathcal {L} _ {j e r k} = \sum_ {t} ^ {T} j e r k (\theta) \tag {5} +$$ + +where $FK(\cdot)$ is the forward kinematics function, that computes joint coordinates from joint rotations. Given the joint rotations, the parametric SMPL body model generates a corresponding body mesh with 6890 vertices. + +# 3.3 Global Translation Estimation + +Translation estimation from IMUs is challenging as they lack direct distance measurements. Moreover, IMUs are prone to noise and biases, which causes techniques such as double-integration of acceleration to rapidly accumulate errors [46]. Therefore, inspired by prior work [23, 48, 49], we estimate per-frame velocity of the root joint using two submodules: a foot-ground contact $(v_{f})$ and a neural network based root velocity estimator $(v_{e})$ . We fuse the output of the two submodules to obtain a final estimate of global translation. + +3.3.1 Foot-Ground Contact based Root Velocity $(v_{f})$ . Here we estimate the probability of each foot contacting the ground independently using a bi-LSTM network. The input to the model is the concatenated vector of joint positions and IMU measurements. The output of the network is the likelihood that each foot is contacting the ground, denoted as $c_{foot} = [c_{lfoot}, c_{rfoot}] \in \mathbb{R}^2$ . The foot with the higher foot-ground contact probability is defined as the supporting foot, $s = \max \{c_{\mathrm{foot}}, c_{\mathrm{rfoot}}\}$ . The root velocity, $v_{f}(t) \in \mathbb{R}^{3}$ , is then computed as the coordinate difference of the supporting foot between consecutive frames. This approach helps capture natural body motions, as movement is significantly influenced by the supporting foot's dynamics [37]. For example, when walking, the body's movement is propelled forward and stabilized by the foot contacting the ground. The network is trained using binary cross-entropy loss. +3.3.2 Neural Network based Root Velocity $(v_{e})$ . While the supporting foot contact based method yields plausible human movement, it inherently fails when both feet are not contacting the ground (e.g., when running or jumping). To accommodate such cases, we estimate per-frame root velocity directly using a neural network. We again use the predicted joint coordinates and IMU measurements as input. Compared to previous submodules that use a bi-LSTM for + +prediction, this module uses a unidirectional LSTM due to its capacity to capture longer historical context. The output is per-frame root velocity, denoted as $v_{e}(t) \in \mathbb{R}^{3}$ . The network is trained using a cumulative L2 loss [49]. + +3.3.3 Module Fusion. Both modules offer different trade-offs in terms of predicting translation. Supporting foot provides more realistic estimates by leveraging human kinematics but fails when both feet are off the ground. On the other hand, directly estimating root velocity is more general but is highly prone to unnatural movements such as foot sliding [52]. To achieve the benefits of both, we adopt the heuristic-based fusion approach, inspired by TransPose [49]. In summary, when the foot contact $c$ is higher than an upper-threshold $\overline{q}$ , we are confident of ground contact by a foot and hence we rely on $(v_{f})$ for translation estimation. When the foot contact is below a lower-threshold, $q$ , we rely on $(v_{e})$ . For intermediate probabilities, we fuse both velocity estimations using a weighted sum, to output the final global velocity estimate $v$ : + +$$ +v = \frac {q - \bar {q}}{\underline {{q}} - \bar {q}} v _ {e} + \frac {q - \underline {{q}}}{\bar {q} - \underline {{q}}} v _ {f} \tag {6} +$$ + +Following previous work [49], we use $q = 0.5$ and $\overline{q} = 0.9$ . + +# 3.4 Physics-Aware Refinement + +Our pose and translation estimation networks output the user's global pose based on a history of IMU measurements. When trained on sufficiently large amounts of data, the full-body pose estimation and global translation estimation neural networks learn the human motion manifold and produce realistic poses. However, despite the best modeling efforts, the outputs may still contain inter-mesh penetration, temporal artifacts such as jitter, foot-floor penetration and foot skating. To address these issues, we add an off-the-shelf physics motion optimizer [48]. The physics optimizer uses two proportional derivative (PD) controllers to compute the desired acceleration of the simulated character that best reproduces the estimated pose while satisfying physical constraints, such as the equation of motion [12]. The inputs to the physics optimizer are the estimated joint angles $\theta$ , the foot-ground contact probabilities $c_{foot}$ , and the neural network based root velocity $v_{e}$ . The outputs are the optimized joint angles and global translation with reduced jitter and foot-ground penetration (Figure 4). For a detailed overview of the physics optimizer, we refer readers to PIP [48]. + +![](images/a5fdf6041545d3658902b169cb999bb2423047dfed789e142ee272c001e87cc9.jpg) +Figure 4: Demonstration of the physics optimizer's ability to reduce foot-ground penetration. + +# 3.5 Real-time Inference + +We implement proof-of-concept applications in iOS, using an Apple iPhone 15 Pro, Apple Watch Series 9 and Apple AirPods Pro. The iPhone, Apple Watch and AirPods sample IMU data at 60, 60 and $25\mathrm{Hz}$ respectively. For uniformity, we convert all the IMU data to $60\mathrm{Hz}$ by upsampling the AirPods. + +We employ the active device selection strategy proposed by IMUPoser [28], wherein the UWB and inertial data is used to track the active devices and their on-body locations. For initial prototyping, the Apple Watch and AirPods communicate over Bluetooth to the iPhone, which streams data to a MacBook Air 2022 via socket. Post connection, a small calibration step is performed to align the IMU measurements with the training data, similar to prior work [14, 28, 49]. Following the setup, data is streamed to the laptop for pre-processing, inference and then relayed to Unity applications for visualization. + +To further prototype an on-device edge model, we convert our trained PyTorch model into CoreML with mixed precision quantization and evaluate its performance. On an iPhone 15 Pro, our model incurs $\sim 14\mathrm{ms}$ model inference time running at $60\mathrm{Hz}$ , capped by input IMU sampling rate. + +# 4 DATA SYNTHESIS AND MODEL TRAINING + +Model training requires a large collection of synchronized IMU measurements and corresponding SMPL body poses. We leverage the AMASS [26] MoCap dataset, which provides an extensive collection of such data(~40 hours), including translation. + +# 4.1 Full-Body Pose Estimation + +Our models expect IMU measurements as input. We synthesize IMU data following the approach proposed in DIP [14]. In summary, we place virtual sensors on the corresponding SMPL mesh vertices (left and right wrists, left and right pockets, and the head) and obtain joint rotations via limb orientations, while acceleration values are computed using finite differences. During training, we scale down the acceleration by a factor of $30m / s^2$ , such that its values are on a similar scale to orientations, for better learning. Of note, we do not normalize our IMU measurements to a root joint (e.g., the pelvis), as the number of available devices can vary. + +# 4.2 Global Translation Estimation + +The translation estimation networks require (1) binary labels for foot-ground contact states and (2) per-frame root velocity values. To generate foot-ground contact states, we assume that a foot in contact with the ground displays very little movement between frames. Therefore, when the movement of one foot between consecutive frames is less than a threshold $u$ , then we consider it to be contacting the ground. We set $u = 0.008$ , following previous work [49]. To train $v_{e}$ , we require per-frame root velocities. Since the AMASS dataset provides root position data, we can compute root velocities as the coordinate difference of the root position between consecutive frames. + +# 4.3 Training Setup and Procedure + +We train our models on a NVIDIA A40 GPU, which takes roughly a day for all modules and device-combinations. In total, our model has + +$\sim 6.7M$ trainable parameters. Each module is trained separately using a batch size of 256 and the Adam optimizer [21] with a learning rate of $\mathrm{lr} = 10^{-3}$ for 80 epochs. We also apply a gradient clipping with norm of 1, to prevent the gradients from exploding. + +During training of $\mathcal{F}^{\theta}$ , $v_{e}$ , and $v_{f}$ , we add Gaussian noise with $\sigma = 0.04$ to the joint positions to prevent overfitting and deal with prediction errors from $\mathcal{F}^{joint}$ . We empirically set $\lambda = 10^{-5}$ when training $\mathcal{F}^{\theta}$ , to encourage temporally smooth predictions. + +# 5 EVALUATION + +We systematically isolate and analyze the efficacy of MobilePoser across different datasets, evaluation metrics and protocols. We show both qualitative and quantitative results, and also run ablation studies to evaluate our translation estimation design choices. + +# 5.1 Datasets + +We evaluate MobilePoser on three real-world, inertial datasets, summarized in Table 2: + +- DIP-IMU [14] contains data from 10 participants, collected using commercial-grade Xsens [45] IMUs at $60\mathrm{Hz}$ . It includes a rich variety of activities such as arm raises, stretches, lunges, squats, and punches. However, DIP-IMU does not contain global translation data. +- TotalCapture [40] provides real IMU measurements with ground-truth pose and translation, captured using commercial Xsens IMUs at $60\mathrm{Hz}$ . Following PIP [48], we re-calibrate the acceleration measurements to account for constant bias. +- IMUPoser [28] is collected from 10 participants using consumer-grade devices: an iPhone 11 Pro, Apple Watch Series 6, and AirPods, at $25\mathrm{Hz}$ . It provides ground-truth pose and global translation data. + +# 5.2 Full-Body Pose Estimation + +5.2.1 Evaluation Metrics. Like prior work, we use the following evaluation metrics for pose estimation (lower is better for all): + +- Mean Per Joint Rotation Error (MPJRE): Measure of mean angular error across all root aligned joints in degrees $(^{\circ})$ . +- Mean Per Joint Position Error (MPJPE): Measure of mean Euclidean distance error across all root aligned joints in centimeters (cm). +- Mean Per Joint Vertex Error (MPJVE): Measure of mean Euclidean distance error across all root aligned vertices of the SMPL body mesh in centimeters (cm). +- Mean Per Joint Jitter (Jitter): Measure of mean jerk across all body joints of the predicted motion in $m / s^3$ . + +We use MPJVE as our primary metric of evaluation for ease of comparison with prior work [28]. + +
DatasetCapture DeviceTranslationData FPS
DIP-IMUCommercial×60 Hz
TotalCaptureCommercial60 Hz
IMUPoserConsumer25 Hz
+ +Table 2: Real-world IMU datasets for MobilePoser Evaluation. + +![](images/ede03b447a6f857c0266e6eff4f01b390fd362031a467d8cdaa8a83ca3d76bf3.jpg) +Figure 5: Comparison of MobilePoser's Full-Body Pose Estimation Error across different Evaluation Protocols on the DIP-IMU, IMUPoser and TotalCapture dataset respectively. + +5.2.2 Evaluation Protocol. We outline three evaluation protocols for training and fine-tuning to evaluate MobilePoser's efficacy across different data sources and noise profiles. + +- Base Model: We train our model on the synthetic data generated on the AMASS dataset. +- Finetune DIP-IMU: Like prior work, we train on AMASS and then fine-tune on 8 DIP-IMU participants. The 2 holdout participants are used for testing the Finetune DIP-IMU model on the DIP-IMU dataset. +- Finetune IMUPoser: We train on AMASS and fine-tune on the first 8 IMUPoser participants. The 2 holdout participants are used for testing the Finetune IMUPoser model on the IMUPoser dataset. + +5.2.3 Accuracy across Datasets. Figure 5 shows our full-body pose estimation accuracy for all three protocols across the three datasets listed in Section 5.1. Averaged across all three datasets, the MPJVE for the Base Model, Finetune DIP-IMU and Finetune IMUPoser protocols are 11.89, 11.73 and $11.33\mathrm{cm}$ respectively. It is interesting to note that the addition of commercial-grade IMU data (Finetune DIP-IMU) only improves accuracy by $1.3\%$ over the base model, while the addition of noisy IMU data from consumer devices (Finetune IMUPoser) results in a bigger improvement of $4.7\%$ . + +5.2.4 Accuracy across Activities. We further analyze results on different activities on the IMUPoser dataset, as it provides activity label meta-data. MobilePoser's accuracy generalizes across most everyday activity contexts: the error (MPJVE) for locomotion is 8.2 cm (walking 7.6 cm, jogging 8.8 cm), exercises is 10 cm (kicking: 7.5 cm, jumping jacks: 11.1 cm, boxing: 11.5 cm), sitting is 11.5 cm and freestyle motions such as tennis and basketball are 9.1 cm and 11.7 cm respectively. The accuracy degrades for postures with the user lying/facing down, e.g. push-ups have higher error of 16.1 cm. +5.2.5 Comparison with prior work. To aid in direct comparison with prior work [14, 28, 48, 49], we now make use of the Finetune DIP-IMU evaluation protocol, that is training a base model on the synthetic IMU data from AMASS and fine-tuning it on the 8 participants from DIP-IMU dataset. Tables 1 and 3 offer a quantitative + +
System# Inst. JointsMPJREMPJVEJitter
DIP617.2°11.23.62
TransPose612.8°7.40.95
PIP612.1°6.50.20
IMUPoser1-325.6°15.41.30
MobilePoser1-323.7°12.60.55
+ +Table 3: Comparison with key prior work on the TotalCapture dataset. + +comparison against key prior work, evaluated on the DIP-IMU and TotalCapture, dataset respectively. Given that our system targets a very sparse configuration of IMUs (1-3), it is unsurprising that we perform worse than systems utilizing 6 IMUs, strategically placed around the body. On the DIP-IMU and TotalCapture dataset, compared to IMUPoser, which considers the same device-location combinations, we perform significantly better displaying a $12.4\%$ and $18.2\%$ decrease in vertex error respectively. + +On the IMUPoser dataset, Figure 7 (A) provides a detailed breakdown of accuracy for different on-body device locations. Averaging across the 1, 2 and 3 device conditions, MobilePoser outperforms IMUPoser by $24.1\%$ , $14.2\%$ and $8.7\%$ respectively. Furthermore, Figure 7 (B) provides an accuracy breakdown for the instrumented and non-instrumented joints in comparison with IMUPoser. If a limb has an IMU placed on any part, we consider all the joints pertaining to it as instrumented joints, while the rest are marked as non-instrumented. MobilePoser is $18.1\%$ and $17.4\%$ better than IMUPoser for predicting instrumented and non-instrumented joints respectively. This can be seen in Figure 6 which depicts a visual comparison of our pose estimation with IMUPoser. + +![](images/41dd9e935a6e838257beb11b887e0433dc3580897df48f9c7643911446cbaaf3.jpg) +Figure 6: Qualitative comparisons between our method and IMUPoser on the DIP-IMU and IMUPoser dataset. + +![](images/24a6ce53daf981e05ba77dd84c8966fdf8230686765f953522e471c00433664e.jpg) +Figure 7: MPJVE comparison between IMUPoser and MobilePoser (our system) on the IMUPoser Dataset for: (A) Different on-body device combinations (B) Instrumented vs Non Instrumented joints. + +# 5.3 Global Translation Estimation + +5.3.1 Evaluation Protocol. We evaluate our Global Translation Estimation module on the TotalCapture and IMUPoser datasets, as DIP-IMU lacks translation data. Like prior work [48, 49], we use the Finetune DIP-IMU protocol (Section 5.2.2), that is we train on AMASS and fine-tune on 8 participants of DIP-IMU to track the Root Translation Error (Euclidean norm of the cumulative distance errors within 1 second). +5.3.2 Accuracy across Datasets and Body Regions. On the Total-Capture and IMUPoser dataset, our mean root translation error across all device combinations is 27.55 and $17.63\mathrm{cm}$ respectively. Interestingly, for both IMUPoser and TotalCapture datasets, we observe only a slight decrease in error when increasing the number of devices from one to two $(6.1\%)$ and no significant improvement $(4.0\%)$ when increasing from two devices to three. Analysing the error across different body regions for the single device scenario + +![](images/36e1570002c8570b361bdc0aa62d67edbc98118850cfb1dbe5dae9fc5bd7f18b.jpg) +Figure 8: (A) Comparison of cumulative translation error for different instrumented joints on the IMUPoser and Total-Capture dataset. (B) Evaluation of cumulative distance errors with respect to time. + +(Figure 8) (A), we see that a device in the pocket has a much lower error $(14.8\mathrm{cm})$ compared to that on the wrist $(25.7\mathrm{cm})$ or the head $(29.7\mathrm{cm})$ . This can be attributed to the legs capturing most of the locomotion data during translation, resulting in marginal gains from sensors on the upper-body. Figure 8 (B) shows the the cumulative distance error over time. + +5.3.3 Ablation Study. We perform ablation studies to understand the impact of key components in our system and their effects on performance. At the core of our system lies a subtle yet powerful concept: higher-order digitization (e.g., body pose) improves lower-order digitizations (e.g., steps). To quantify this idea, we run an ablation study of our translation estimation technique using both IMU data and the corresponding full-body pose inferred from it versus using only IMU data. Figure 9 summarizes our results. Our IMU-only, direct regression has an error of $21.4\mathrm{cm}$ across both + +![](images/7b97abca80b72e72c22ffcf61d1df2388fd2a3e720e8c40f16839719378dbc45.jpg) +Figure 9: Benefits of using high-order digitization (i.e., IMU inferred poses) for estimating global translation. + +![](images/9a40e3573eaab617538725d13e6c909e5c242d39f4c53f843dea1f2131d3bbad.jpg) +Figure 10: Example indoor navigation application where MobilePoser digitizes multiple users within an office space. + +![](images/468b8d082bb0ee2c8c8e2bf1858a5b7eddc0b0a61d02d612a1e9d13acd5a000c.jpg) + +datasets, while our integrated $(\mathrm{IMU} + \mathrm{IMU}$ inferred pose) approach decreases error by $29.4\%$ to $15.1~\mathrm{cm}$ . + +Building on the multi-stage architecture, we further evaluate the impact of two additional components: jerk loss and physics refinement. These elements were designed to enhance motion smoothness and physical plausibility. For the IMUPoser dataset, the jerk loss reduces jitter by $23.9\%$ and translation error by $3.33\%$ , but increases mean pose error by $0.05\%$ . Further, the physics-aware refinement reduces jitter by $29.7\%$ and translation error by $0.4\%$ , but increases the mean pose error by $0.7\%$ . The negligible increase in mean pose error is expected, as it may occasionally over-smooth the motion. This phenomenon is also seen in the PIP [48]. We believe that significant improvements in jitter and translation far outweigh the minimal increase in pose error, resulting in a more realistic motion. + +5.3.4 Comparison with prior work. To the best of our knowledge, no other works have explored both full-body pose and translation from such a sparse set of commodity IMUs. IMUPoser [28], which also targets consumer devices, does not estimate global translation. On the TotalCapture dataset, TransPose (6 IMUs) has a translation error of $12.8\mathrm{cm}$ while that of MobilePoser is $19.9\mathrm{cm}$ when a single IMU device is placed in the pocket. Unsurprisingly, a commercial grade, 6 IMU-based system has higher accuracy due to their waist and knee mounted sensors, which capture larger ranges of locomotion compared to devices carried in the pocket. + +# 6 EXAMPLE USES + +MobilePoser enables full-body pose estimation with global motion tracking using devices that users already own, opening up a wide range of novel applications. This section showcases three proof-of-concept applications in indoor navigation, gaming, and healthcare to illustrate MobilePoser's unique capabilities and potential impact. + +# 6.1 Indoor Localization and Navigation + +To demonstrate MobilePoser's potential in this domain, we scan an office space using the PolyCam [34] LiDAR scanner app with + +![](images/8bf1542bdaa768fc5e2a80f6613166a3fb2954882e1c0b9e4d765ccec6f2611a.jpg) + +![](images/9ed4af4fa541a6111eb81d829e2a5dee85f3efe408f46523f63cfff3248702bf.jpg) + +![](images/2af18b4f56ea893a7d4db8d091c0a22c68ebfee9c664430ea05299bdf5efa3c4.jpg) +Figure 11: In this table tennis game users can move around the table freely and use their wrist-instrumented hand to control their racket. + +![](images/b463e6ba1e085e714896d2c0eb817f52b2064dd728a766208e5c2a436692c40d.jpg) + +an Apple iPhone 15 Pro. As shown in Figure 10, multiple users walk through the virtual office space, with their interactions and movements seamlessly digitized and represented in real-time. Here, one user has a phone in their pocket and a watch on their wrist, while the other two only have a phone in their pocket. By leveraging the IMUs in these consumer devices, MobilePoser enables accurate indoor navigation and localization without the need for additional infrastructure or specialized hardware. This opens up exciting possibilities for applications such as indoor way finding, context-aware virtual assistants, and immersive virtual tours. + +# 6.2 Mobile Gaming Experiences + +To showcase this potential, we developed a virtual table tennis game (Figure 11) that allows users to play remotely with others, similar to how Nintendo games are played in front of a TV. Each player has a phone in their pocket and a watch on the dominant (left) hand, which is controlling the racket. Players can freely move within their local space to control their avatars, adding a new level of physical interaction to the gaming experience. MobilePoser's ability to track full-body movements using everyday devices eliminates the need for specialized controllers, making immersive gaming experiences more accessible to a wider audience. + +# 6.3 Fitness and Wellness + +MobilePoser has the potential to revolutionize fitness tracking and rehabilitation by providing accurate, real-time feedback on a user's movements and poses without the need for external sensors or camera setups. This enables users to monitor their exercise form, track progress, and receive personalized guidance using the devices + +![](images/d8de858657724951663c9cad16d40d80050174bb0830339947de87e3c6a36abc.jpg) +Figure 12: MobilePoser's full-body pose and locomotion can be used to automatically detect and count exercise repetitions, better estimate calories and monitor form. + +they already own. In this example (Figure 12), a user performs a workout routine while MobilePoser captures the session using the IMU data from the smartphone in the user's pocket. This not only allows the user to review their performance and track progress over time but also enables remote monitoring by fitness instructors or physical therapists. Moreover, MobilePoser's ability to track full-body movements facilitates interactive rehabilitation regimens [4] and other passive health sensing applications such as gait analysis [30] or hyperactivity detection [6], among others. + +# 7 OPEN SOURCE + +To enable other researchers and practitioners to build upon our work, we release our pre-trained models, data pre-processing scripts, and model training code as open-source software at: https://github. com/SPICExLAB/MobilePoser. By making our work fully reproducible and extensible, we hope to accelerate research and development in the field of mobile motion capture using everyday devices. + +# 8 LIMITATIONS AND FUTURE WORK + +While MobilePoser demonstrates promising results in estimating full-body pose and translation using minimal instrumentation, there are several limitations and opportunities for future work. First, as a purely inertial-based technique, MobilePoser's translation estimation is still susceptible to drift, particularly when devices deviate from their calibrated positions. This can occur when users wear loose clothing, causing the phone in the pocket to move around and resulting in orientation changes. To address this issue, future work could explore re-calibration techniques based on stationary poses or leverage additional sensory information, such as GPS, UWB or visual odometry, to correct for drift. + +Second, akin to prior wor, our evaluation has limitations of being tested on lab collected datasets. All the test datasets (DIP, TotalCapture, IMUPoser) were collected in lab settings due to the need for an accurate external ground truth motion capture system. Although we empirically demonstrate that MobilePoser works in real-world + +settings (as seen in the accompanying video), we acknowledge the need for future datasets captured in-the-wild. + +Another limitation of MobilePoser, much like other prior works [14, 28, 48, 49], is the need for a calibration step. Currently, users first stand in a T-pose, which aligns the IMU data with the training data based on the SMPL kinematic model. While this calibration process is acceptable for some use cases, such as gaming, it may be less desirable for applications that demand seamless interactions, like indoor navigation. Future work could investigate more natural and unobtrusive calibration procedures, such as detecting common poses like standing with arms by the side using UWB, similar to SmartPoser [11]. + +In conclusion, while MobilePoser presents a significant step forward in enabling full-body pose and translation estimation using everyday devices, there remain several avenues for future research to extend the capabilities of this approach. + +# 9 CONCLUSION + +In this paper, we present MobilePoser, a real-time, on-device system for estimating full-body pose and translation using IMUs in consumer mobile devices (phones, watches, earbuds). By leveraging a multi-stage approach that combines data-driven learning and physics-based optimization, MobilePoser achieves state-of-the-art accuracy while remaining lightweight and efficient. Our extensive evaluation on public datasets demonstrates clear improvements over prior work, both in terms of full-body pose estimation accuracy and enabling novel global translation estimation. Furthermore, we showcase the potential of MobilePoser through a series of proof-of-concept applications in gaming, fitness, and indoor navigation, highlighting its ability to enable new and immersive experiences using the devices people already own. + +# ACKNOWLEDGMENTS + +We thank Jianru Ding from the University of Chicago and Zeya Chen from the Institute of Design, Illinois Institute of Technology for helping film the video. Vasco Xu's and Henry Hoffmann's work on this project is supported by NSF (CCF-1823032 and CNS-1956180). + +# REFERENCES + +[1] [n. d]. PlayStation VR. https://www.playstation.com/en-us/explore/playstationvr/. +[2] 2023. HTC Vive. https://www.vive.com. +[3] Karan Ahuja. 2024. Practical and Rich User Digitization. arXiv:2403.00153 [cs.HC] https://arxiv.org/abs/2403.00153 +[4] Karan Ahuja, Sven Mayer, Mayank Goel, and Chris Harrison. 2021. Pose-on-the-go: Approximating user pose with smartphone sensor fusion and inverse kinematics. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 1-12. +[5] Karan Ahuja, Vivian Shen, Cathy Mengying Fang, Nathan Riopelle, Andy Kong, and Chris Harrison. 2022. Controllerpose: inside-out body capture with VR controller cameras. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems. 1-13. +[6] Riku Arakawa, Karan Ahuja, Kristie Mak, Gwendolyn Thompson, Sam Shaaban, Oliver Lindhiem, and Mayank Goel. 2023. LemurDx: Using Unconstrained Passive Sensing for an Objective Measurement of Hyperactivity in Children with no Parent Input. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 2 (2023), 1-23. +[7] Riku Arakawa, Bing Zhou, Gurunandan Krishnan, Mayank Goel, and Shree K Nayar. 2023. MI-Poser: Human Body Pose Tracking Using Magnetic and Inertial Sensor Fusion with Metal Interference Mitigation. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 3 (2023), 1-24. + +[8] Rayan Armani, Changlin Qian, Jiaxi Jiang, and Christian Holz. 2024. Ultra Inertial Poser: Scalable Motion Capture and Tracking from Sparse Inertial Sensors and Ultra-Wideband Ranging. In ACM SIGGRAPH 2024 Conference Papers. 1-11. +[9] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. 2016. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. Springer, 561-578. +[10] Nathan Devrio and Chris Harrison. 2022. discoBand: Multiview Depth-Sensing Smartwatch Strap for Hand, Body and Environment Tracking. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-13. +[11] Nathan DeVrio, Vimal Mollyn, and Chris Harrison. 2023. SmartPoser: Arm Pose Estimation with a Smartphone and Smartwatch Using UWB and IMU Data. In Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology. 1-11. +[12] Roy Featherstone. 2014. Rigid body dynamics algorithms. Springer. +[13] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. 2023. Humans in 4d: Reconstructing and tracking humans with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 14783-14794. +[14] Yinghao Huang, Manuel Kaufmann, Emre Aksan, Michael J Black, Otmar Hilliges, and Gerard Pons-Moll. 2018. Deep inertial pose: Learning to reconstruct human pose from sparse inertial measurements in real time. ACM Transactions on Graphics (TOG) 37, 6 (2018), 1-15. +[15] Fan Jiang, Xubo Yang, and Lele Feng. 2016. Real-time full-body motion reconstruction and recognition for off-the-shelf VR devices. In Proceedings of the 15th ACM SIGGRAPH Conference on Virtual-Reality Continuum and Its Applications in Industry-Volume 1, 309–318. +[16] Jiaxi Jiang, Paul Streli, Huajian Qiu, Andreas Fender, Larissa Laich, Patrick Snape, and Christian Holz. 2022. Avatarposer: Articulated full-body pose tracking from sparse motion sensing. In European Conference on Computer Vision. Springer, 443-460. +[17] Yifeng Jiang, Yuting Ye, Deepak Gopinath, Jungdam Won, Alexander W Winkler, and C Karen Liu. 2022. Transformer Inertial Poser: Real-time human motion reconstruction from sparse IMUs with simultaneous terrain generation. In SIGGRAPH Asia 2022 Conference Papers. 1-9. +[18] Haojian Jin, Zhijian Yang, Swarun Kumar, and Jason I Hong. 2018. Towards wearable everyday body-frame tracking using passive RFIDs. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 4 (2018), 1-23. +[19] Daehwa Kim and Chris Harrison. 2022. Etherpose: Continuous hand pose tracking with wrist-worn antenna impedance characteristic sensing. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-12. +[20] David Kim, Otmar Hilliges, Shahram Izadi, Alex D Butler, Jiawen Chen, Jason Oikonomidis, and Patrick Olivier. 2012. Digits: freehand 3D interactions anywhere using a wrist-worn gloveless sensor. In Proceedings of the 25th annual ACM symposium on User interface software and technology. 167-176. +[21] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014). +[22] Alexander Kyu, Hongyu Mao, Junyi Zhu, Mayank Goel, and Karan Ahuja. 2024. EITPose: Wearable and Practical Electrical Impedance Tomography for Continuous Hand Pose Estimation. In Proceedings of the CHI Conference on Human Factors in Computing Systems. 1-10. +[23] Jiye Lee and Hanbyul Joo. 2024. Mocap Everyone Everywhere: Lightweight Motion Capture With Smartwatches and a Head-Mounted Camera. arXiv preprint arXiv:2401.00847 (2024). +[24] Yilin Liu, Shijia Zhang, and Mahanth Gowda. 2021. NeuroPose: 3D hand pose tracking using EMG wearables. In Proceedings of the Web Conference 2021. 1471-1482. +[25] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. 2015. SMPL: A Skinned Multi-Person Linear Model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34, 6 (Oct. 2015), 248:1-248:16. +[26] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. 2019. AMASS: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision. 5442-5451. +[27] Microsoft Corporation. [n.d.]. Microsoft Kinect. +[28] Vimal Mollyn, Riku Arakawa, Mayank Goel, Chris Harrison, and Karan Ahuja. 2023. IMUPoser: Full-Body Pose Estimation using IMUs in Phones, Watches, and Earbuds. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-12. +[29] NaturalPoint, Inc. [n.d.]. OptiTrack. https://www.optitrack.com. +[30] Shu Nishiguchi, Minoru Yamada, Koutatsu Nagai, Shuhei Mori, Yuu Kajiwara, Takuya Sonoda, Kazuya Yoshimura, Hiroyuki Yoshitomi, Hiromu Ito, Kazuya Okamoto, et al. 2012. Reliability and validity of gait analysis by android-based smartphone. Telemedicine and e-Health 18, 4 (2012), 292–296. + +[31] Northern Digital Inc. 2020. travSTAR. https://www.ndigital.com/msci/products/drivebay-trakstar. +[32] Mathias Parger, Joerg H Mueller, Dieter Schmalstieg, and Markus Steinberger. 2018. Human upper-body inverse kinematics for increased embodiment in consumer-grade virtual reality. In Proceedings of the 24th ACM symposium on virtual reality software and technology. 1-10. +[33] Polhemus. 2020. Polhemus Motion Capture System. https://polhemus.com/. +[34] PolyCam. [n.d.]. PolyCam. https://poly.cam/. +[35] Jose Luis Ponton, Haoran Yun, Andreas Aristidou, Carlos Andujar, and Nuria Pelechano. 2023. SparsePoser: Real-time Full-body Motion Reconstruction from Sparse Data. ACM Transactions on Graphics 43, 1 (2023), 1-14. +[36] Jathushan Rajasegaran, Georgios Pavlakos, Angjoo Kanazawa, and Jitendra Malik. 2021. Tracking people with 3D representations. arXiv preprint arXiv:2111.07868 (2021). +[37] Nirupam Roy, He Wang, and Romit Roy Choudhury. 2014. I am a smartphone and i can tell my user's walking direction. In Proceedings of the 12th annual international conference on Mobile systems, applications, and services. 329-342. +[38] Takaki Shiratori, Hyun Soo Park, Leonid Sigal, Yaser Sheikh, and Jessica K Hodgins. 2011. Motion capture from body-mounted cameras. In ACM SIGGRAPH 2011 papers. 1-10. +[39] Ivan E Sutherland. 1968. A head-mounted three dimensional display. In Proceedings of the December 9-11, 1968, fall joint computer conference, part I. 757-764. +[40] Matthew Trumble, Andrew Gilbert, Charles Malleson, Adrian Hilton, and John Collomosse. 2017. Total capture: 3d human pose estimation fusing video and inertial sensors. In Proceedings of 28th British Machine Vision Conference. 1-13. +[41] Vicon Motion Systems Ltd. [n.d.]. Vicon. https://www.vicon.com. +[42] Daniel Vlasic, Rolf Adelsberger, Giovanni Vannucci, John Barnwell, Markus Gross, Wojciech Matusik, and Jovan Popovic. 2007. Practical motion capture in everyday surroundings. ACM transactions on graphics (TOG) 26, 3 (2007), 35-es. +[43] Timo Von Marcard, Bodo Rosenhahn, Michael J Black, and Gerard Pons-Moll. 2017. Sparse inertial poser: Automatic 3d human pose estimation from sparse imus. In Computer graphics forum, Vol. 36. Wiley Online Library, 349-360. +[44] Erwin Wu, Ye Yuan, Hui-Shyong Yeo, Aaron Quigley, Hideki Koike, and Kris M Kitani. 2020. Back-hand-posed: 3d hand pose estimation for a wrist-worn camera via dorsum deformation network. In Proceedings of the 33rd Annual ACM Symposium on User Interface Software and Technology. 1147–1160. +[45] Xsens Technologies B.V. [n.d.]. Xsens IMU Systems. https://www.xsens.com. Accessed: 2024-03-07. +[46] Hang Yan, Qi Shan, and Yasutaka Furukawa. 2018. RIDI: Robust IMU double integration. In Proceedings of the European conference on computer vision (ECCV), 621-636. +[47] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Vladislav Golyanik, Shaohua Pan, Christian Theobalt, and Feng Xu. 2023. EgoLocate: Real-time Motion Capture, Localization, and Mapping with Sparse Body-mounted Sensors. arXiv preprint arXiv:2305.01599 (2023). +[48] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Soshi Shimada, Vladislav Golyanik, Christian Theobalt, and Feng Xu. 2022. Physical inertial poser (pip): Physics-aware real-time human motion tracking from sparse inertial sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 13167-13178. +[49] Xinyu Yi, Yuxiao Zhou, and Feng Xu. 2021. Transpose: Real-time 3d human translation and pose estimation with six inertial sensors. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-13. +[50] Yang Zhang, Chouchang Yang, Scott E Hudson, Chris Harrison, and Alanson Sample. 2018. Wall++ room-scale interactive and context-aware sensing. In Proceedings of the 2018 chi conference on human factors in computing systems. 1-15. +[51] Mingmin Zhao, Tianhong Li, Mohammad Abu Alsheikh, Yonglong Tian, Hang Zhao, Antonio Torralba, and Dina Katabi. 2018. Through-wall human pose estimation using radio signals. In Proceedings of the IEEE conference on computer vision and pattern recognition. 7356-7365. +[52] Li'an Zhuo, Jian Cao, Qi Wang, Bang Zhang, and Liefeng Bo. 2023. Towards Stable Human Pose Estimation via Cross-View Fusion and Foot Stabilization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 650-659. \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12492/images/012c9012e1a82bf3c97763379fc6abf4fc5f128e7f2716153832ccdd13538073.jpg b/data/2025/2504_12xxx/2504.12492/images/012c9012e1a82bf3c97763379fc6abf4fc5f128e7f2716153832ccdd13538073.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b47ab596bcf8e194fd20fc84587b44cfa8e53ec2 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/012c9012e1a82bf3c97763379fc6abf4fc5f128e7f2716153832ccdd13538073.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f139e6d3055538b49e9b663760d9b82235bd19d00feddec6be85324538e276c9 +size 4403 diff --git a/data/2025/2504_12xxx/2504.12492/images/0756f6e551704363b0a80d0018307791429a28468cb1415b01872fdd090bcd17.jpg b/data/2025/2504_12xxx/2504.12492/images/0756f6e551704363b0a80d0018307791429a28468cb1415b01872fdd090bcd17.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e8549955b53f3420f264304fb57e06e1e7840f0e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/0756f6e551704363b0a80d0018307791429a28468cb1415b01872fdd090bcd17.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e57bca2cda352bb465125f378349411c3bb6bd67fcdeadfc1dcfcd0f890bd03 +size 20252 diff --git a/data/2025/2504_12xxx/2504.12492/images/1add247a60f95c26c9a84937cea737253382140a4f5f366dc8a57cffd9159d14.jpg b/data/2025/2504_12xxx/2504.12492/images/1add247a60f95c26c9a84937cea737253382140a4f5f366dc8a57cffd9159d14.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bba5f43bfb49663b3e624cdf1c195a5cf668121 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/1add247a60f95c26c9a84937cea737253382140a4f5f366dc8a57cffd9159d14.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1f4146561293303d5df00fb0ec3d2dbb746565eccf8bcf2e997723b00bc96eb +size 3715 diff --git a/data/2025/2504_12xxx/2504.12492/images/1b2a7df0cabf3fc0e25cb50f472835af79ab269436848ca26ae321703af8357f.jpg b/data/2025/2504_12xxx/2504.12492/images/1b2a7df0cabf3fc0e25cb50f472835af79ab269436848ca26ae321703af8357f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..45bf9a0b5e70b2e7a9886d19dbd9c668f7093722 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/1b2a7df0cabf3fc0e25cb50f472835af79ab269436848ca26ae321703af8357f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c00308771117074bd4232174366e2110e83acd093fa3d91378e788e160bf3fe1 +size 17376 diff --git a/data/2025/2504_12xxx/2504.12492/images/21cb0fed5824f2b52eaead0f3b91c03397de145b60f7fc3fc56c8e8b6dc9ec69.jpg b/data/2025/2504_12xxx/2504.12492/images/21cb0fed5824f2b52eaead0f3b91c03397de145b60f7fc3fc56c8e8b6dc9ec69.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d4ecdffa87ff3b79b85f840dd68af8a22e51760 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/21cb0fed5824f2b52eaead0f3b91c03397de145b60f7fc3fc56c8e8b6dc9ec69.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b11f0879c2deafc5b30b03511fae38e2b658f85dd6a31604b288dc040334c078 +size 41579 diff --git a/data/2025/2504_12xxx/2504.12492/images/245ac487888e82f4f86eed6cb209cc3d96f68466dd6718a8c94d350761353cc9.jpg b/data/2025/2504_12xxx/2504.12492/images/245ac487888e82f4f86eed6cb209cc3d96f68466dd6718a8c94d350761353cc9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7e31c395e51b02b48586ac302f338216b099329 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/245ac487888e82f4f86eed6cb209cc3d96f68466dd6718a8c94d350761353cc9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f28f8af8d7f89980e91494e081c704d352f15cde20cd13ee8fce35adfbd95a1 +size 17555 diff --git a/data/2025/2504_12xxx/2504.12492/images/24a6ce53daf981e05ba77dd84c8966fdf8230686765f953522e471c00433664e.jpg b/data/2025/2504_12xxx/2504.12492/images/24a6ce53daf981e05ba77dd84c8966fdf8230686765f953522e471c00433664e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a22225970ce6d47effc30aef03bb39b7826ff968 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/24a6ce53daf981e05ba77dd84c8966fdf8230686765f953522e471c00433664e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bd888965303865cd97f87557cb716c7ce2a03ac4e5ad287f8a841d3183a6581 +size 115172 diff --git a/data/2025/2504_12xxx/2504.12492/images/2af18b4f56ea893a7d4db8d091c0a22c68ebfee9c664430ea05299bdf5efa3c4.jpg b/data/2025/2504_12xxx/2504.12492/images/2af18b4f56ea893a7d4db8d091c0a22c68ebfee9c664430ea05299bdf5efa3c4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f832c583721b51f4fac1c1f7cc31163fab01a3c4 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/2af18b4f56ea893a7d4db8d091c0a22c68ebfee9c664430ea05299bdf5efa3c4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:555881b9b7a10a415d39588bdf30ec07986656ec8081dc2eee2fa8520a8873ed +size 18768 diff --git a/data/2025/2504_12xxx/2504.12492/images/3169657663aa0b2619685cc0f22df97a38dbc5e3520318143006da927e02c2d9.jpg b/data/2025/2504_12xxx/2504.12492/images/3169657663aa0b2619685cc0f22df97a38dbc5e3520318143006da927e02c2d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e5061ecc9028e0f1e4e179c1a7b0c16f57375d4 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/3169657663aa0b2619685cc0f22df97a38dbc5e3520318143006da927e02c2d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:299737e71b767456047ed45bfef78677522d5d7d4159599bbde4c2a064715ba2 +size 14007 diff --git a/data/2025/2504_12xxx/2504.12492/images/36e1570002c8570b361bdc0aa62d67edbc98118850cfb1dbe5dae9fc5bd7f18b.jpg b/data/2025/2504_12xxx/2504.12492/images/36e1570002c8570b361bdc0aa62d67edbc98118850cfb1dbe5dae9fc5bd7f18b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf5db1a8f24f1a09439b5f8ed715b6feb9b005d8 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/36e1570002c8570b361bdc0aa62d67edbc98118850cfb1dbe5dae9fc5bd7f18b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cc6d93bfdbfac84e4c66edc45ff99a59ceb8d90b24de121a09acb9b254be2c24 +size 28372 diff --git a/data/2025/2504_12xxx/2504.12492/images/3c2eb974ddb24edaa9597fd4b66033fe86777991e12341a6a7db51af8002f3c2.jpg b/data/2025/2504_12xxx/2504.12492/images/3c2eb974ddb24edaa9597fd4b66033fe86777991e12341a6a7db51af8002f3c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c3bbb9800002160a736d9804b2a461990ead66e4 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/3c2eb974ddb24edaa9597fd4b66033fe86777991e12341a6a7db51af8002f3c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:860102c7c960b14850f9e96f71633d20afe27cdb6189a9892ed90a539beec69f +size 14480 diff --git a/data/2025/2504_12xxx/2504.12492/images/41dd9e935a6e838257beb11b887e0433dc3580897df48f9c7643911446cbaaf3.jpg b/data/2025/2504_12xxx/2504.12492/images/41dd9e935a6e838257beb11b887e0433dc3580897df48f9c7643911446cbaaf3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..991c756f84ed42451598213d0eeda41df740423e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/41dd9e935a6e838257beb11b887e0433dc3580897df48f9c7643911446cbaaf3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c5316bddf8d6f4529622e72ab8cba457fa03a9dd843e8f76e0d4490691702a4 +size 46794 diff --git a/data/2025/2504_12xxx/2504.12492/images/468b8d082bb0ee2c8c8e2bf1858a5b7eddc0b0a61d02d612a1e9d13acd5a000c.jpg b/data/2025/2504_12xxx/2504.12492/images/468b8d082bb0ee2c8c8e2bf1858a5b7eddc0b0a61d02d612a1e9d13acd5a000c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3671a45f1fb574028d7cf4d04f293d2bb1ca059 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/468b8d082bb0ee2c8c8e2bf1858a5b7eddc0b0a61d02d612a1e9d13acd5a000c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14ec72c934bf0c8defccbb0d06d60d57b249e502f78efa66f44ae862bf1ca1b5 +size 19848 diff --git a/data/2025/2504_12xxx/2504.12492/images/696e3fe3b988e348657a5ae71dc830e60ea0483d72b1488924a444e669644195.jpg b/data/2025/2504_12xxx/2504.12492/images/696e3fe3b988e348657a5ae71dc830e60ea0483d72b1488924a444e669644195.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d3046fe1d74c5d764aee59c9876ae2ec83fb7b2 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/696e3fe3b988e348657a5ae71dc830e60ea0483d72b1488924a444e669644195.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:799d17d5d508987d38f4bd33d692e88cc826c9b6b8a8439388530752acca741a +size 30770 diff --git a/data/2025/2504_12xxx/2504.12492/images/7b97abca80b72e72c22ffcf61d1df2388fd2a3e720e8c40f16839719378dbc45.jpg b/data/2025/2504_12xxx/2504.12492/images/7b97abca80b72e72c22ffcf61d1df2388fd2a3e720e8c40f16839719378dbc45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63fde6c1a403eda8193834b81cf5b908d3be02bb --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/7b97abca80b72e72c22ffcf61d1df2388fd2a3e720e8c40f16839719378dbc45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8729f2d754762caed34c27340a26a120e1fc5f964fa3f3f1afd005cddbe92557 +size 20197 diff --git a/data/2025/2504_12xxx/2504.12492/images/82d7e3785f82d268bc65de4155687f47ebd6e5f7505f1700e0b66ace8a9d59c2.jpg b/data/2025/2504_12xxx/2504.12492/images/82d7e3785f82d268bc65de4155687f47ebd6e5f7505f1700e0b66ace8a9d59c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0ab8d38c0c6ebf97db40e9232a9a61611a1aeefa --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/82d7e3785f82d268bc65de4155687f47ebd6e5f7505f1700e0b66ace8a9d59c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e88da11ad9a57aa19d7fbb9b1f7e30c94951d942942293d67a83d6d98f1c1e5 +size 71393 diff --git a/data/2025/2504_12xxx/2504.12492/images/85a597f5f89be09951d147dd3ce0870e8b95c3e0346b07bfdcafb9beef99c8b2.jpg b/data/2025/2504_12xxx/2504.12492/images/85a597f5f89be09951d147dd3ce0870e8b95c3e0346b07bfdcafb9beef99c8b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3fce0d88756a2a23f94ac85e979cd1becd87cd99 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/85a597f5f89be09951d147dd3ce0870e8b95c3e0346b07bfdcafb9beef99c8b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5420054feb4010ae3eefd12d3f7ddd5976482849d361d5c4360d8f0a3a32445 +size 3266 diff --git a/data/2025/2504_12xxx/2504.12492/images/8bf1542bdaa768fc5e2a80f6613166a3fb2954882e1c0b9e4d765ccec6f2611a.jpg b/data/2025/2504_12xxx/2504.12492/images/8bf1542bdaa768fc5e2a80f6613166a3fb2954882e1c0b9e4d765ccec6f2611a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04df12f7b2530df2ea5b0eac3b00a0f85bd9598d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/8bf1542bdaa768fc5e2a80f6613166a3fb2954882e1c0b9e4d765ccec6f2611a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a7d0d33309774fb03fc2f70b2068770a51059b49bd92a24619603bb760ed2fa +size 19783 diff --git a/data/2025/2504_12xxx/2504.12492/images/99ed87bf7a0cfd7d8516898be6c0c8c9a3be2623360a9c68576d7a576fcbccca.jpg b/data/2025/2504_12xxx/2504.12492/images/99ed87bf7a0cfd7d8516898be6c0c8c9a3be2623360a9c68576d7a576fcbccca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caf28d61e37a8aed05312d8f66cc67080aa9f449 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/99ed87bf7a0cfd7d8516898be6c0c8c9a3be2623360a9c68576d7a576fcbccca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fbe2d0e9bb4cbf349cd4dda765717c52879254016e0c11633b00208719dc2a27 +size 16723 diff --git a/data/2025/2504_12xxx/2504.12492/images/9a40e3573eaab617538725d13e6c909e5c242d39f4c53f843dea1f2131d3bbad.jpg b/data/2025/2504_12xxx/2504.12492/images/9a40e3573eaab617538725d13e6c909e5c242d39f4c53f843dea1f2131d3bbad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72dff0ab93ec23f83d679ff99e7efa0ae3c07cd0 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/9a40e3573eaab617538725d13e6c909e5c242d39f4c53f843dea1f2131d3bbad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42aca41d675231cea825076efaf48686fb68c5500e49ded6e3f65e150a857707 +size 25619 diff --git a/data/2025/2504_12xxx/2504.12492/images/9c83b064207a227a0c4f07b1711e26559babc7e4f26de546ec70e40642ad027d.jpg b/data/2025/2504_12xxx/2504.12492/images/9c83b064207a227a0c4f07b1711e26559babc7e4f26de546ec70e40642ad027d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f293a4074f8504f051c41fc1a2a999d80fe6c3a5 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/9c83b064207a227a0c4f07b1711e26559babc7e4f26de546ec70e40642ad027d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51bc08b64d71c39fc3adfd230839451912b485c6043cf001945113c571fcc141 +size 3857 diff --git a/data/2025/2504_12xxx/2504.12492/images/9ed4af4fa541a6111eb81d829e2a5dee85f3efe408f46523f63cfff3248702bf.jpg b/data/2025/2504_12xxx/2504.12492/images/9ed4af4fa541a6111eb81d829e2a5dee85f3efe408f46523f63cfff3248702bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c7097aff51f04e1ef8eaac48592bbf162f390c6 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/9ed4af4fa541a6111eb81d829e2a5dee85f3efe408f46523f63cfff3248702bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:37d80cde68989427c61bad7b8556babc748d434e988db567e4b23389bbe8c341 +size 12088 diff --git a/data/2025/2504_12xxx/2504.12492/images/a5fdf6041545d3658902b169cb999bb2423047dfed789e142ee272c001e87cc9.jpg b/data/2025/2504_12xxx/2504.12492/images/a5fdf6041545d3658902b169cb999bb2423047dfed789e142ee272c001e87cc9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..777fcb2ec117b379231a5afd445049544b2015ee --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/a5fdf6041545d3658902b169cb999bb2423047dfed789e142ee272c001e87cc9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:549b4677f28837f3c9bf09dc7190ed2a2ca7455b69acc0dadf922695bbc94c7b +size 22356 diff --git a/data/2025/2504_12xxx/2504.12492/images/b10bd07999b484b01ea1a78ab581c9df06c52de8c54920b77d3806f92132d486.jpg b/data/2025/2504_12xxx/2504.12492/images/b10bd07999b484b01ea1a78ab581c9df06c52de8c54920b77d3806f92132d486.jpg new file mode 100644 index 0000000000000000000000000000000000000000..124c06dcf0889f56cbfef3a1591867d325f4de51 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/b10bd07999b484b01ea1a78ab581c9df06c52de8c54920b77d3806f92132d486.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c1a807b4c35cea26e4832cfe72b954a79a1920db9d0bda4ba97f68b3d7b522f +size 3192 diff --git a/data/2025/2504_12xxx/2504.12492/images/b3048a04b939af7f80f294f9a87a4c591d20d22ace67ce0015d8db97f8dd61f1.jpg b/data/2025/2504_12xxx/2504.12492/images/b3048a04b939af7f80f294f9a87a4c591d20d22ace67ce0015d8db97f8dd61f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a87b9a29d5235f0c2f1f62fd3f6f591c630b4c0 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/b3048a04b939af7f80f294f9a87a4c591d20d22ace67ce0015d8db97f8dd61f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cfe69894c47d93a1f5bd2aa862bd39d87a631e2093c4fc9f88bf86026acc4a1a +size 42991 diff --git a/data/2025/2504_12xxx/2504.12492/images/b463e6ba1e085e714896d2c0eb817f52b2064dd728a766208e5c2a436692c40d.jpg b/data/2025/2504_12xxx/2504.12492/images/b463e6ba1e085e714896d2c0eb817f52b2064dd728a766208e5c2a436692c40d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ceba1770f0fc5adccc085a3b2c8d6eed2c2d6abb --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/b463e6ba1e085e714896d2c0eb817f52b2064dd728a766208e5c2a436692c40d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c998cc14d168fa793aac143c0d904f6499c7fc877f1488510e6eefea4b5dd427 +size 11603 diff --git a/data/2025/2504_12xxx/2504.12492/images/d8de858657724951663c9cad16d40d80050174bb0830339947de87e3c6a36abc.jpg b/data/2025/2504_12xxx/2504.12492/images/d8de858657724951663c9cad16d40d80050174bb0830339947de87e3c6a36abc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..84a8a58732e7ce83c831d0ee451b70c3640f310d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/d8de858657724951663c9cad16d40d80050174bb0830339947de87e3c6a36abc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c2dd7cf72e60b73b7faad5379d97971a7c5ff6e2728335b520115dab07b9aa1 +size 60152 diff --git a/data/2025/2504_12xxx/2504.12492/images/ec6ffac28fee5afd7196de7805b6b0dfc2f59e8909d73362d94f468974edd810.jpg b/data/2025/2504_12xxx/2504.12492/images/ec6ffac28fee5afd7196de7805b6b0dfc2f59e8909d73362d94f468974edd810.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a47a5ee3c210425278d557a32ee2b857a44c875 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/ec6ffac28fee5afd7196de7805b6b0dfc2f59e8909d73362d94f468974edd810.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:819940e819754635f0f7c1a19a9059a854f9630bd1b1f0d2abd5d4c6f12182ed +size 3578 diff --git a/data/2025/2504_12xxx/2504.12492/images/ede03b447a6f857c0266e6eff4f01b390fd362031a467d8cdaa8a83ca3d76bf3.jpg b/data/2025/2504_12xxx/2504.12492/images/ede03b447a6f857c0266e6eff4f01b390fd362031a467d8cdaa8a83ca3d76bf3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5cd3191d5f0d32c7ecea12db8a8a413ba9249f30 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/ede03b447a6f857c0266e6eff4f01b390fd362031a467d8cdaa8a83ca3d76bf3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cbf0d71f18dcf446d684ea5b83f48c9cfa8cc69c1e5e3d711a075644724fdcfc +size 26563 diff --git a/data/2025/2504_12xxx/2504.12492/images/f1aa1fb87ee39eb83a3a850d85c7930c38f8b9d2834c06689b08d09eb640adb7.jpg b/data/2025/2504_12xxx/2504.12492/images/f1aa1fb87ee39eb83a3a850d85c7930c38f8b9d2834c06689b08d09eb640adb7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3177a070e601032d5f08f27bcb4327fa86c81620 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/images/f1aa1fb87ee39eb83a3a850d85c7930c38f8b9d2834c06689b08d09eb640adb7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99d4306006384cc701d454cd671e7a77f0f9d5a684fdcef7a03dae425dc72043 +size 21701 diff --git a/data/2025/2504_12xxx/2504.12492/layout.json b/data/2025/2504_12xxx/2504.12492/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..7b140955b57d4656e9e013f4285e7ff4e84bc98f --- /dev/null +++ b/data/2025/2504_12xxx/2504.12492/layout.json @@ -0,0 +1,11398 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 80, + 541, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 80, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 69, + 80, + 541, + 118 + ], + "type": "text", + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 158, + 125, + 206, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 158, + 125, + 206, + 137 + ], + "spans": [ + { + "bbox": [ + 158, + 125, + 206, + 137 + ], + "type": "text", + "content": "Vasco Xu" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 137, + 138, + 228, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 138, + 228, + 149 + ], + "spans": [ + { + "bbox": [ + 137, + 138, + 228, + 149 + ], + "type": "text", + "content": "University of Chicago" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 154, + 150, + 211, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 150, + 211, + 162 + ], + "spans": [ + { + "bbox": [ + 154, + 150, + 211, + 162 + ], + "type": "text", + "content": "Chicago, USA" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 133, + 163, + 231, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 163, + 231, + 174 + ], + "spans": [ + { + "bbox": [ + 133, + 163, + 231, + 174 + ], + "type": "text", + "content": "vascoxu@uchicago.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 139, + 183, + 225, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 183, + 225, + 196 + ], + "spans": [ + { + "bbox": [ + 139, + 183, + 225, + 196 + ], + "type": "text", + "content": "Henry Hoffmann" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 137, + 197, + 227, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 197, + 227, + 208 + ], + "spans": [ + { + "bbox": [ + 137, + 197, + 227, + 208 + ], + "type": "text", + "content": "University of Chicago" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 153, + 209, + 211, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 209, + 211, + 220 + ], + "spans": [ + { + "bbox": [ + 153, + 209, + 211, + 220 + ], + "type": "text", + "content": "Chicago, USA" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 115, + 220, + 249, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 220, + 249, + 232 + ], + "spans": [ + { + "bbox": [ + 115, + 220, + 249, + 232 + ], + "type": "text", + "content": "hankhoffmann@cs.uchicago.edu" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 392, + 125, + 465, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 125, + 465, + 137 + ], + "spans": [ + { + "bbox": [ + 392, + 125, + 465, + 137 + ], + "type": "text", + "content": "Chenfeng Gao" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 377, + 138, + 481, + 149 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 138, + 481, + 149 + ], + "spans": [ + { + "bbox": [ + 377, + 138, + 481, + 149 + ], + "type": "text", + "content": "Northwestern University" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 397, + 150, + 460, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 150, + 460, + 160 + ], + "spans": [ + { + "bbox": [ + 397, + 150, + 460, + 160 + ], + "type": "text", + "content": "Evanston, USA" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 348, + 162, + 509, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 348, + 162, + 509, + 174 + ], + "spans": [ + { + "bbox": [ + 348, + 162, + 509, + 174 + ], + "type": "text", + "content": "chenfenggao2029@u.northwestern.edu" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 397, + 183, + 460, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 183, + 460, + 196 + ], + "spans": [ + { + "bbox": [ + 397, + 183, + 460, + 196 + ], + "type": "text", + "content": "Karan Ahuja" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 376, + 196, + 480, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 376, + 196, + 480, + 208 + ], + "spans": [ + { + "bbox": [ + 376, + 196, + 480, + 208 + ], + "type": "text", + "content": "Northwestern University" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 397, + 209, + 460, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 397, + 209, + 460, + 219 + ], + "spans": [ + { + "bbox": [ + 397, + 209, + 460, + 219 + ], + "type": "text", + "content": "Evanston, USA" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 373, + 220, + 484, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 373, + 220, + 484, + 232 + ], + "spans": [ + { + "bbox": [ + 373, + 220, + 484, + 232 + ], + "type": "text", + "content": "kahuja@northwestern.edu" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 52, + 245, + 304, + 413 + ], + "blocks": [ + { + "bbox": [ + 52, + 245, + 304, + 413 + ], + "lines": [ + { + "bbox": [ + 52, + 245, + 304, + 413 + ], + "spans": [ + { + "bbox": [ + 52, + 245, + 304, + 413 + ], + "type": "image", + "image_path": "21cb0fed5824f2b52eaead0f3b91c03397de145b60f7fc3fc56c8e8b6dc9ec69.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 423, + 559, + 447 + ], + "lines": [ + { + "bbox": [ + 50, + 423, + 559, + 447 + ], + "spans": [ + { + "bbox": [ + 50, + 423, + 559, + 447 + ], + "type": "text", + "content": "Figure 1: MobilePoser uses any subset of consumer mobile devices (phones, watches, earbuds) available to estimate full-body pose and global translation." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 304, + 245, + 515, + 413 + ], + "blocks": [ + { + "bbox": [ + 304, + 245, + 515, + 413 + ], + "lines": [ + { + "bbox": [ + 304, + 245, + 515, + 413 + ], + "spans": [ + { + "bbox": [ + 304, + 245, + 515, + 413 + ], + "type": "image", + "image_path": "3c2eb974ddb24edaa9597fd4b66033fe86777991e12341a6a7db51af8002f3c2.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 450, + 113, + 461 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 450, + 113, + 461 + ], + "spans": [ + { + "bbox": [ + 51, + 450, + 113, + 461 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 50, + 465, + 295, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 465, + 295, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 465, + 295, + 620 + ], + "type": "text", + "content": "There has been a continued trend towards minimizing instrumentation for full-body motion capture, going from specialized rooms and equipment, to arrays of worn sensors and recently sparse inertial pose capture methods. However, as these techniques migrate towards lower-fidelity IMUs on ubiquitous commodity devices, like phones, watches, and earbuds, challenges arise including compromised online performance, temporal consistency, and loss of global translation due to sensor noise and drift. Addressing these challenges, we introduce MobilePoser, a real-time system for full-body pose and global translation estimation using any available subset of IMUs already present in these consumer devices. MobilePoser employs a multi-stage deep neural network for kinematic pose estimation followed by a physics-based motion optimizer, achieving state-of-the-art accuracy while remaining lightweight. We conclude" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 314, + 452, + 559, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 452, + 559, + 486 + ], + "spans": [ + { + "bbox": [ + 314, + 452, + 559, + 486 + ], + "type": "text", + "content": "with a series of demonstrative applications to illustrate the unique potential of MobilePoser across a variety of fields, such as health and wellness, gaming, and indoor navigation to name a few." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 315, + 495, + 400, + 506 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 495, + 400, + 506 + ], + "spans": [ + { + "bbox": [ + 315, + 495, + 400, + 506 + ], + "type": "text", + "content": "CCS CONCEPTS" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 509, + 560, + 532 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 509, + 560, + 532 + ], + "spans": [ + { + "bbox": [ + 314, + 509, + 560, + 532 + ], + "type": "text", + "content": "- Human-centered computing " + }, + { + "bbox": [ + 314, + 509, + 560, + 532 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 314, + 509, + 560, + 532 + ], + "type": "text", + "content": " Ubiquitous and mobile computing." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 315, + 540, + 380, + 552 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 540, + 380, + 552 + ], + "spans": [ + { + "bbox": [ + 315, + 540, + 380, + 552 + ], + "type": "text", + "content": "KEYWORDS" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 314, + 555, + 559, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 555, + 559, + 567 + ], + "spans": [ + { + "bbox": [ + 314, + 555, + 559, + 567 + ], + "type": "text", + "content": "Motion capture, sensors, inertial measurement units, mobile devices" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 315, + 571, + 405, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 571, + 405, + 580 + ], + "spans": [ + { + "bbox": [ + 315, + 571, + 405, + 580 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 314, + 581, + 560, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 581, + 560, + 640 + ], + "spans": [ + { + "bbox": [ + 314, + 581, + 560, + 640 + ], + "type": "text", + "content": "Vasco Xu, Chenfeng Gao, Henry Hoffmann, and Karan Ahuja. 2024. MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices. In The 37th Annual ACM Symposium on User Interface Software and Technology (UIST '24), October 13–16, 2024, Pittsburgh, PA, USA. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3654777.3676461" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 315, + 651, + 421, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 651, + 421, + 662 + ], + "spans": [ + { + "bbox": [ + 315, + 651, + 421, + 662 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 313, + 666, + 560, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 666, + 560, + 711 + ], + "spans": [ + { + "bbox": [ + 313, + 666, + 560, + 711 + ], + "type": "text", + "content": "Full-body motion capture has numerous applications in gaming, fitness, and virtual and augmented reality (VR/AR), enabling immersive experiences and context-aware interactions. While vision-based approaches for 3D human pose estimation have shown great" + } + ] + } + ], + "index": 31 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 209, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.12492v1 [cs.HC] 16 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 634, + 295, + 675 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 634, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 634, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 52, + 676, + 194, + 684 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 676, + 194, + 684 + ], + "spans": [ + { + "bbox": [ + 52, + 676, + 194, + 684 + ], + "type": "text", + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 52, + 685, + 186, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 685, + 186, + 693 + ], + "spans": [ + { + "bbox": [ + 52, + 685, + 186, + 693 + ], + "type": "text", + "content": "© 2024 Copyright held by the owner/author(s)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 52, + 693, + 155, + 700 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 693, + 155, + 700 + ], + "spans": [ + { + "bbox": [ + 52, + 693, + 155, + 700 + ], + "type": "text", + "content": "ACM ISBN 979-8-4007-0628-8/24/10" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 167, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/3654777.3676461" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 207, + 286 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 207, + 286 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 207, + 286 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 207, + 286 + ], + "type": "image", + "image_path": "696e3fe3b988e348657a5ae71dc830e60ea0483d72b1488924a444e669644195.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 297, + 560, + 331 + ], + "lines": [ + { + "bbox": [ + 50, + 297, + 560, + 331 + ], + "spans": [ + { + "bbox": [ + 50, + 297, + 560, + 331 + ], + "type": "text", + "content": "Figure 2: Real-time global pose estimation powered by MobilePoser: (A) Person with smartwatch (left wrist) waving their hands. (B) Person with smartwatch (left wrist) performing jumping jacks. (C) Person wearing a smartwatch (left wrist) and carrying a phone in their right pocket running." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 209, + 82, + 306, + 286 + ], + "blocks": [ + { + "bbox": [ + 209, + 82, + 306, + 286 + ], + "lines": [ + { + "bbox": [ + 209, + 82, + 306, + 286 + ], + "spans": [ + { + "bbox": [ + 209, + 82, + 306, + 286 + ], + "type": "image", + "image_path": "0756f6e551704363b0a80d0018307791429a28468cb1415b01872fdd090bcd17.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 306, + 82, + 402, + 285 + ], + "blocks": [ + { + "bbox": [ + 306, + 82, + 402, + 285 + ], + "lines": [ + { + "bbox": [ + 306, + 82, + 402, + 285 + ], + "spans": [ + { + "bbox": [ + 306, + 82, + 402, + 285 + ], + "type": "image", + "image_path": "245ac487888e82f4f86eed6cb209cc3d96f68466dd6718a8c94d350761353cc9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 403, + 82, + 482, + 285 + ], + "blocks": [ + { + "bbox": [ + 403, + 82, + 482, + 285 + ], + "lines": [ + { + "bbox": [ + 403, + 82, + 482, + 285 + ], + "spans": [ + { + "bbox": [ + 403, + 82, + 482, + 285 + ], + "type": "image", + "image_path": "1b2a7df0cabf3fc0e25cb50f472835af79ab269436848ca26ae321703af8357f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 482, + 82, + 558, + 285 + ], + "blocks": [ + { + "bbox": [ + 482, + 82, + 558, + 285 + ], + "lines": [ + { + "bbox": [ + 482, + 82, + 558, + 285 + ], + "spans": [ + { + "bbox": [ + 482, + 82, + 558, + 285 + ], + "type": "image", + "image_path": "3169657663aa0b2619685cc0f22df97a38dbc5e3520318143006da927e02c2d9.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 346, + 295, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 346, + 295, + 402 + ], + "spans": [ + { + "bbox": [ + 50, + 346, + 295, + 402 + ], + "type": "text", + "content": "promise, they require subjects to be within the camera's field of view, limiting their practicability for mobile and on-the-go applications. In contrast, inertial measurement unit (IMU) based techniques offer an attractive alternative, enabling less intrusive and occlusion-free user digitization [3]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 402, + 295, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 402, + 295, + 554 + ], + "spans": [ + { + "bbox": [ + 50, + 402, + 295, + 554 + ], + "type": "text", + "content": "Commercial systems such as Xsens [45] use up to 17 special-purpose sensors to provide highly accurate pose estimations. However, such approaches are intrusive, making them undesirable for everyday use. Consequently, there has been a trend towards minimizing instrumentation. Sparse inertial pose capture methods, such as TransPose [49] and DIP [14], use 6 IMUs to achieve a balance between accuracy and practicality. Yet, these methods still require expensive and special-purpose IMUs attached to specific body joints. To enable full-body motion tracking without any external infrastructure, IMUPoser [28] leverages IMUs in devices we already carry around with us, namely smartphones, smartwatches, and earbuds. These commodity devices, however, use lower-fidelity IMUs, which compromises online performance, temporal consistency, and global translation estimation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 555, + 295, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 555, + 295, + 676 + ], + "spans": [ + { + "bbox": [ + 50, + 555, + 295, + 676 + ], + "type": "text", + "content": "In this work, we present MobilePoser, a real-time user digitization technique that tracks both poses and global movement (referred to as translation) using consumer devices (Figure 1) such as watches, phones and earbuds. To enable on-the-go motion tracking without any external infrastructure, we must address a set of unique challenges. First, the number of instrumented points is dynamically changing and sparse (at most three devices, with as few as one), making the problem highly under-constrained. Second, IMUs do not directly measure positional data, making global translation tracking non-trivial. Additionally, noise and drift from the low-cost IMUs found in commodity devices complicates pose and translation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 346, + 558, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 346, + 558, + 368 + ], + "spans": [ + { + "bbox": [ + 314, + 346, + 558, + 368 + ], + "type": "text", + "content": "estimation. Finally, such a system should operate directly on-device for real-time use, anywhere, anytime." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 369, + 559, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 369, + 559, + 577 + ], + "spans": [ + { + "bbox": [ + 313, + 369, + 559, + 577 + ], + "type": "text", + "content": "MobilePoser tackles these challenges by employing a multi-stage approach. For pose estimation, it utilizes a deep neural network (DNN) to predict full-body pose from the available IMU data, followed by a physics-based optimization step to ensure spatiotemporal consistency and plausible kinematics. This greatly helps resolve ambiguous instrumented joint motion profiles, such as differentiating between waving (Figure 2 A) versus jumping jacks (Figure 2 B) from only a single smartwatch on the wrist. To aid in generalizability, the model is trained on a large dataset of synthesized IMU measurements generated from high-quality motion capture (MoCap) data. For global translation estimation, MobilePoser employs a hybrid approach that fuses predictions from a foot contact-based method and a DNN-based method that directly regresses the root joint velocity. This combination enables accurate and robust translation estimation, even in challenging scenarios where both feet are in motion together (Figure 2 C). Importantly, MobilePoser is optimized to run on-device, achieving real-time performance of 60 frames per second on a smartphone (iPhone 15 Pro), making it suitable for mobile applications." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 324, + 578, + 559, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 578, + 559, + 588 + ], + "spans": [ + { + "bbox": [ + 324, + 578, + 559, + 588 + ], + "type": "text", + "content": "In summary, MobilePoser makes the following key contributions:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 326, + 600, + 560, + 709 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 326, + 600, + 560, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 600, + 560, + 632 + ], + "spans": [ + { + "bbox": [ + 326, + 600, + 560, + 632 + ], + "type": "text", + "content": "(1) It presents a novel framework for inertial translation estimation using consumer devices, enabling accurate tracking of global movement without specialized hardware." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 326, + 633, + 559, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 633, + 559, + 676 + ], + "spans": [ + { + "bbox": [ + 326, + 633, + 559, + 676 + ], + "type": "text", + "content": "(2) It achieves state-of-the-art full-body pose estimation across various on-body configurations of commodity IMU devices, demonstrating robust performance with as few as one and up to three wearable devices." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 326, + 677, + 559, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 677, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 326, + 677, + 559, + 709 + ], + "type": "text", + "content": "(3) It provides an open-source implementation that runs in real-time on edge devices, making it accessible and practical for widespread use." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "text", + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "text", + "content": "Xu, et al." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 52, + 700, + 262, + 709 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 700, + 262, + 709 + ], + "spans": [ + { + "bbox": [ + 52, + 700, + 262, + 709 + ], + "type": "text", + "content": "1Note, we count the left and right earbuds as a unified single IMU stream" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 92, + 81, + 519, + 173 + ], + "blocks": [ + { + "bbox": [ + 92, + 81, + 519, + 173 + ], + "lines": [ + { + "bbox": [ + 92, + 81, + 519, + 173 + ], + "spans": [ + { + "bbox": [ + 92, + 81, + 519, + 173 + ], + "type": "table", + "html": "
System# Inst. JointsFPSConsumer DeviceTranslationMPJVE (cm)Jitter (102m/s3)
Xsens [45]17120×--
SIP [43]660×7.73.8
DIP [14]629××8.930.13
TransPose [49]690×7.11.4
PIP [48]660×5.90.24
IMUPoser [28]1-325×12.11.9
MobilePoser (our work)1-36010.60.97
", + "image_path": "b3048a04b939af7f80f294f9a87a4c591d20d22ace67ce0015d8db97f8dd61f1.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 169, + 173, + 440, + 183 + ], + "lines": [ + { + "bbox": [ + 169, + 173, + 440, + 183 + ], + "spans": [ + { + "bbox": [ + 169, + 173, + 440, + 183 + ], + "type": "text", + "content": "Table 1: Comparison with key prior work on the DIP-IMU dataset." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 50, + 211, + 159, + 223 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 211, + 159, + 223 + ], + "spans": [ + { + "bbox": [ + 50, + 211, + 159, + 223 + ], + "type": "text", + "content": "2 RELATED WORK" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 228, + 274, + 240 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 228, + 274, + 240 + ], + "spans": [ + { + "bbox": [ + 50, + 228, + 274, + 240 + ], + "type": "text", + "content": "2.1 User Digitization with External Sensors" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 241, + 295, + 363 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 241, + 295, + 363 + ], + "spans": [ + { + "bbox": [ + 50, + 241, + 295, + 363 + ], + "type": "text", + "content": "Commercial motion capture systems such as OptiTrack [29] and Vicon [41] use specialized hardware, such as multiple calibrated high-speed infrared cameras, to track retroreflective markers attached to a user's body. Such setups are commonly used in games, movies and character animations that require millimeter accuracy and are the gold standard of motion capture. The expensive infrastructure required by commercial systems, makes them impractical for everyday use. Therefore, much research has been devoted to instrumentation-free approaches using monocular cameras. Such approaches generally rely on RGB [9, 13, 36] or depth [27] cameras based computer vision techniques to predict body pose." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 363, + 295, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 363, + 295, + 462 + ], + "spans": [ + { + "bbox": [ + 50, + 363, + 295, + 462 + ], + "type": "text", + "content": "There also exists specialized external hardware for pose tracking in Extended Reality (XR). For example, the HTC Vive [2], PlayStation VR [1] and Oculus Rift [32] track the head, handheld controllers and other limb-borne accessories using external sensor base stations for Virtual Reality (VR) applications. The un-sensed joints are estimated with inverse kinematics [15] or learning-based methods [16, 35]. Other non-optical external approaches for pose estimation include capacitive sensing [50], magnetic fields [31, 33], RF [51], and mechanical linkages [39]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 484, + 267, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 484, + 267, + 509 + ], + "spans": [ + { + "bbox": [ + 50, + 484, + 267, + 509 + ], + "type": "text", + "content": "2.2 User Digitization with non-IMU Worn Sensors" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 512, + 295, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 512, + 295, + 589 + ], + "spans": [ + { + "bbox": [ + 50, + 512, + 295, + 589 + ], + "type": "text", + "content": "Wearable sensors provide a portable and flexible alternative to external sensors. For example, MI-Poser [7] uses magnetic tracking in wristbands and AR glasses to estimate upper-body poses. Other works have explored wrist-worn cameras [20, 44], EMG sensors [24], EIT sensors [22], wrist-worn antennas [19] and depth sensor armbands [10]. However, these works focus solely on capturing the motion of specific body parts (e.g., wrist or upper-body)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 590, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 590, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 590, + 295, + 710 + ], + "type": "text", + "content": "To capture full-body motion, a popular approach is to use body-mounted cameras coupled with computer vision techniques [5, 38]. Other works have explored different sensor technologies such as ultrasonic sensors [42] and RFID [18]. Nevertheless, these works require users to wear sensors they do not already have. Pose-On-The-Go [4] addresses this by estimating full-body pose via extreme sensor fusion, leveraging a phone's front and rear cameras, thus requiring no special instrumentation. However, its computationally expensive and relies heavily on heuristics to power body poses, often resulting in unnatural motions. MobilePoser differentiates itself by focusing on full-body pose estimation using power-efficient" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 213, + 559, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 213, + 559, + 234 + ], + "spans": [ + { + "bbox": [ + 314, + 213, + 559, + 234 + ], + "type": "text", + "content": "IMUs already found in consumer devices, such as smartphones, smartwatches, and earbuds." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 243, + 547, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 243, + 547, + 256 + ], + "spans": [ + { + "bbox": [ + 314, + 243, + 547, + 256 + ], + "type": "text", + "content": "2.3 User Digitization with IMU Worn Sensors" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 258, + 559, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 258, + 559, + 335 + ], + "spans": [ + { + "bbox": [ + 313, + 258, + 559, + 335 + ], + "type": "text", + "content": "Commercial motion capture systems, such as Xsens [45], use a large number of inertial sensors (typically 17) strapped to the body to provide high-quality motion capture. These setups consist of homogeneous, high-grade IMUs that are calibrated for noise and have known positions on the body, resulting in a less ill-posed problem compared to using sparse, heterogeneous sensors. However, such an approach is highly inconvenient and intrusive for everyday use." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 335, + 559, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 335, + 559, + 456 + ], + "spans": [ + { + "bbox": [ + 313, + 335, + 559, + 456 + ], + "type": "text", + "content": "To address this limitation, researchers have explored reconstructing human motions from a reduced number of sensors. Works such as SIP [43], DIP [14], PIP [48], TIP [17], and TransPose [49] have demonstrated the feasibility of using only 6 commercial-grade Xsens IMU sensors for full-body motion capture. Works have further explored integrating other input modalities (e.g. UWB [8] and egocentric images [47]) in addition to the 6 IMUs for increased performance. All these approaches leverage the homogeneity and known calibrated positions of the sensors to achieve accurate pose estimation. However, even 6 sensors can be cumbersome for on-the-go applications, especially those that require passive sensing." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 456, + 559, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 559, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 559, + 555 + ], + "type": "text", + "content": "Recent research has investigated even sparser IMU configurations using commodity devices. IMUPoser [28], which is most closely related to our work, performs pose estimation using any combination of smartphone, smartwatch, and earbuds. While IMUPoser tackles the challenges of heterogeneous sensor quality for pose estimation, it lacks global translation due to IMU noise and drift, and contains unrealistic spatio-temporal motion artifacts. Additionally, IMUPoser runs on a laptop at " + }, + { + "bbox": [ + 313, + 456, + 559, + 555 + ], + "type": "inline_equation", + "content": "25\\mathrm{Hz}" + }, + { + "bbox": [ + 313, + 456, + 559, + 555 + ], + "type": "text", + "content": ", limiting its practicality for real-time mobile applications." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 554, + 559, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 554, + 559, + 632 + ], + "spans": [ + { + "bbox": [ + 313, + 554, + 559, + 632 + ], + "type": "text", + "content": "In contrast, MobilePoser addresses these limitations by demonstrating improved pose estimation accuracy on widely used benchmarks while also estimating global translation (see Table 1). Furthermore, our system is designed to run fully on-device, achieving real-time performance of 60 fps on edge mobile devices. This enables MobilePoser to provide a more practical and accessible solution for on-the-go motion capture using commodity devices." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 639, + 411, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 639, + 411, + 651 + ], + "spans": [ + { + "bbox": [ + 314, + 639, + 411, + 651 + ], + "type": "text", + "content": "3 MOBILEPOSER" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "content": "Estimating a user's full-body pose from a sparse set of IMU observations is a severely under-constrained problem as it aims to infer a high-dimensional quantity, i.e., the full-body pose, from low-dimensional observations that only capture partial motion at each instrumented point. Moreover, multiple possible solutions could" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "text", + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "text", + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 62, + 86, + 541, + 244 + ], + "blocks": [ + { + "bbox": [ + 62, + 86, + 541, + 244 + ], + "lines": [ + { + "bbox": [ + 62, + 86, + 541, + 244 + ], + "spans": [ + { + "bbox": [ + 62, + 86, + 541, + 244 + ], + "type": "image", + "image_path": "82d7e3785f82d268bc65de4155687f47ebd6e5f7505f1700e0b66ace8a9d59c2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 260, + 560, + 316 + ], + "lines": [ + { + "bbox": [ + 50, + 260, + 560, + 316 + ], + "spans": [ + { + "bbox": [ + 50, + 260, + 560, + 316 + ], + "type": "text", + "content": "Figure 3: MobilePoser system overview. MobilePoser accepts any available subset of IMU data from the user and masks absent devices by setting their values to zero. The IMU data is then fed into two main modules: (1) Pose Estimation, which first estimates joint positions followed by joint rotations, and (2) Translation Estimation, which combines foot-ground contact probabilities with a direct neural network-based approach to regress global velocity. Finally, a Physics Optimizer refines the predicted joint rotations and global translation to ensure they satisfy physical constraints." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 332, + 296, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 332, + 296, + 411 + ], + "spans": [ + { + "bbox": [ + 50, + 332, + 296, + 411 + ], + "type": "text", + "content": "explain the observed data, making it challenging to determine the correct pose. To tackle these challenges, we introduce MobilePoser, a system that leverages data-driven learning and physics-based optimization to estimate accurate and plausible full-body poses and global translations from sparse IMU inputs. Figure 3 provides an overview of our pipeline, which we describe in detail in the following sections." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 432, + 145, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 432, + 145, + 444 + ], + "spans": [ + { + "bbox": [ + 51, + 432, + 145, + 444 + ], + "type": "text", + "content": "3.1 System Input" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 446, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 446, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 50, + 446, + 295, + 555 + ], + "type": "text", + "content": "MobilePoser takes as input acceleration and orientation readings from IMUs across any subset of three consumer devices: smartphones, smartwatches, and earbuds. Each of these devices can be placed at different body locations, resulting in various possible combinations. For instance, a smartphone can be stored in the left or right pocket, held in the left or right hand, placed next to the head during a call, or not carried by the user at all. Similarly, smartwatches can be worn on either wrist or not worn at all, while earbuds can be worn, placed in a charging case stored in either pocket, or not carried by the user." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 555, + 296, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 555, + 296, + 621 + ], + "spans": [ + { + "bbox": [ + 50, + 555, + 296, + 621 + ], + "type": "text", + "content": "Following IMUPoser [28], we consider 24 plausible device-location combinations across five body locations: right pocket, left pocket, right wrist, left wrist, and head. These combinations cover the various ways users might carry or wear their devices throughout the day. Regardless of the input device combination, our model expects IMU data from the five predefined body locations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 622, + 296, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 622, + 296, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 622, + 296, + 710 + ], + "type": "text", + "content": "The IMU signal at each location consists of acceleration (3 values) and orientation (a " + }, + { + "bbox": [ + 50, + 622, + 296, + 710 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 50, + 622, + 296, + 710 + ], + "type": "text", + "content": " rotation matrix), resulting in a total of 12 IMU values per location. Across all five locations, this yields an input vector " + }, + { + "bbox": [ + 50, + 622, + 296, + 710 + ], + "type": "inline_equation", + "content": "x \\in \\mathbb{R}^{60}" + }, + { + "bbox": [ + 50, + 622, + 296, + 710 + ], + "type": "text", + "content": ". However, since at any given time only a subset of 1-3 devices may be present, data from absent devices is masked and set to zero. This masking approach allows us to build a unified model that can handle the varying number of available devices and their changing on-body location seamlessly. This further eliminates" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 332, + 560, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 332, + 560, + 354 + ], + "spans": [ + { + "bbox": [ + 314, + 332, + 560, + 354 + ], + "type": "text", + "content": "the need for training separate models for each possible combination, making the system more practical and efficient." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 364, + 474, + 376 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 364, + 474, + 376 + ], + "spans": [ + { + "bbox": [ + 315, + 364, + 474, + 376 + ], + "type": "text", + "content": "3.2 Full-Body Pose Estimation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "spans": [ + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "text", + "content": "To learn a mapping from IMU input to full-body pose, we employ a data-driven, multi-stage neural network approach. Specifically, our pose estimation network consists of two submodules: Joint predictor " + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "inline_equation", + "content": "(\\mathcal{F}^{joint})" + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "text", + "content": " and Rotation predictor " + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "inline_equation", + "content": "(\\mathcal{F}^{\\theta})" + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "text", + "content": ". More specifically, " + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{joint}" + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "text", + "content": " estimates joint positions as an intermediate task and " + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{\\theta}" + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "text", + "content": " solves for the joint angle orientations. Both submodules use a bidirectional LSTM (bi-LSTM), to model both spatial and temporal information [14]. We input data into both submodules in a sliding-window fashion with window length " + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 379, + 560, + 479 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "spans": [ + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "content": "3.2.1 Joint Pose Estimation " + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "inline_equation", + "content": "(\\mathcal{F}^{joint})" + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "content": ". This module estimates the joint positions from a sequence of IMU measurements. We explicitly estimate joint positions as an intermediate step, as it helps extract useful information from linear accelerations due to its linear correlation with joint positions [49]. The input to " + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{joint}" + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "inline_equation", + "content": "x^{imu}(t) = [x_{t-N}, \\ldots, x_t]" + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "content": " is the current time step and " + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "content": " is the time window length. The output are the root (pelvis) relative 3D positions of the 24 SMPL body joints [25] " + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "inline_equation", + "content": "\\pmb{p}(t) = [\\pmb{p}_{t-N}, \\ldots, \\pmb{p}_t] \\in \\mathbb{R}^{N \\times 72}" + }, + { + "bbox": [ + 314, + 484, + 560, + 583 + ], + "type": "text", + "content": ". The loss function used to train this network is:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 396, + 587, + 558, + 601 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 396, + 587, + 558, + 601 + ], + "spans": [ + { + "bbox": [ + 396, + 587, + 558, + 601 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {j o i n t}} = \\left\\| \\mathbf {p} - \\mathbf {p} _ {G T} \\right\\| _ {2} ^ {2} \\tag {1}", + "image_path": "85a597f5f89be09951d147dd3ce0870e8b95c3e0346b07bfdcafb9beef99c8b2.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 604, + 559, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 604, + 559, + 626 + ], + "spans": [ + { + "bbox": [ + 314, + 604, + 559, + 626 + ], + "type": "text", + "content": "where the subscript " + }, + { + "bbox": [ + 314, + 604, + 559, + 626 + ], + "type": "inline_equation", + "content": "GT" + }, + { + "bbox": [ + 314, + 604, + 559, + 626 + ], + "type": "text", + "content": " denotes the ground truth and " + }, + { + "bbox": [ + 314, + 604, + 559, + 626 + ], + "type": "inline_equation", + "content": "p" + }, + { + "bbox": [ + 314, + 604, + 559, + 626 + ], + "type": "text", + "content": " represents the full-body SMPL joint positions." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "text", + "content": "3.2.2 Joint Rotation and Body Mesh Estimation " + }, + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "inline_equation", + "content": "(\\mathcal{F}^{\\theta})" + }, + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "text", + "content": ". Here we employ a neural kinematic estimator to regress joint rotations from the previously estimated positions. We concatenate the joint coordinates from " + }, + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{joint}" + }, + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "text", + "content": " with IMU measurements, which serves as the input to " + }, + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{\\theta}" + }, + { + "bbox": [ + 313, + 632, + 560, + 710 + ], + "type": "text", + "content": ". Note, while the SMPL body encodes 24 joints, only 18 are relevant from a rotation prediction perspective as the fingers, wrist and toes are independent of the on-body IMUs and" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "text", + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "text", + "content": "Xu, et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "content": "are hence set to identity rotation matrices [49]. The outputs of the network are the 18 root relative joint orientations represented as 6D rotations: " + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "inline_equation", + "content": "\\pmb{\\theta}(t) = [\\pmb{\\theta}_{t-N},\\dots,\\pmb{\\theta}_t] \\in \\mathbb{R}^{N \\times 108}" + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "spans": [ + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": "Our joint rotation loss consists of three terms: " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ori}" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pos}" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{jerk}" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": ". The loss term " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{ori}" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": " is a standard L2 loss from the ground truth joint rotations. The term " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{pos}" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": " penalizes error accumulating along the kinematic chain. Finally, " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{jerk}" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": " promotes temporally smooth predictions, where " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "jerk(\\theta) = \\theta_{t-3} + 3\\theta_{t-2} - 3\\theta_{t-1} + \\theta_t" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": " is a function that computes the jerk of a signal " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": " at time step " + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 50, + 118, + 295, + 194 + ], + "type": "text", + "content": ", penalizing the deviation between neighboring frames [49]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 60, + 194, + 295, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 194, + 295, + 205 + ], + "spans": [ + { + "bbox": [ + 60, + 194, + 295, + 205 + ], + "type": "text", + "content": "Our combined joint rotation loss function can be represented as," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 125, + 209, + 295, + 223 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 209, + 295, + 223 + ], + "spans": [ + { + "bbox": [ + 125, + 209, + 295, + 223 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\theta} = \\mathcal {L} _ {\\text {o r i}} + \\mathcal {L} _ {\\text {p o s}} + \\lambda \\mathcal {L} _ {\\text {j e r k}} \\tag {2}", + "image_path": "ec6ffac28fee5afd7196de7805b6b0dfc2f59e8909d73362d94f468974edd810.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 224, + 295, + 239 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 224, + 295, + 239 + ], + "spans": [ + { + "bbox": [ + 120, + 224, + 295, + 239 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {o r i}} = \\left\\| \\theta - \\theta_ {G T} \\right\\| _ {2} ^ {2} \\tag {3}", + "image_path": "b10bd07999b484b01ea1a78ab581c9df06c52de8c54920b77d3806f92132d486.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 118, + 240, + 295, + 254 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 240, + 295, + 254 + ], + "spans": [ + { + "bbox": [ + 118, + 240, + 295, + 254 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {p o s} = \\left\\| \\mathrm {F K} (\\theta) - \\mathbf {p} _ {G T} \\right\\| _ {2} ^ {2} \\tag {4}", + "image_path": "1add247a60f95c26c9a84937cea737253382140a4f5f366dc8a57cffd9159d14.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 115, + 256, + 295, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 256, + 295, + 285 + ], + "spans": [ + { + "bbox": [ + 115, + 256, + 295, + 285 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {j e r k} = \\sum_ {t} ^ {T} j e r k (\\theta) \\tag {5}", + "image_path": "9c83b064207a227a0c4f07b1711e26559babc7e4f26de546ec70e40642ad027d.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 289, + 295, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 289, + 295, + 333 + ], + "spans": [ + { + "bbox": [ + 50, + 289, + 295, + 333 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 289, + 295, + 333 + ], + "type": "inline_equation", + "content": "FK(\\cdot)" + }, + { + "bbox": [ + 50, + 289, + 295, + 333 + ], + "type": "text", + "content": " is the forward kinematics function, that computes joint coordinates from joint rotations. Given the joint rotations, the parametric SMPL body model generates a corresponding body mesh with 6890 vertices." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 342, + 230, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 342, + 230, + 354 + ], + "spans": [ + { + "bbox": [ + 51, + 342, + 230, + 354 + ], + "type": "text", + "content": "3.3 Global Translation Estimation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 357, + 295, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 357, + 295, + 456 + ], + "spans": [ + { + "bbox": [ + 50, + 357, + 295, + 456 + ], + "type": "text", + "content": "Translation estimation from IMUs is challenging as they lack direct distance measurements. Moreover, IMUs are prone to noise and biases, which causes techniques such as double-integration of acceleration to rapidly accumulate errors [46]. Therefore, inspired by prior work [23, 48, 49], we estimate per-frame velocity of the root joint using two submodules: a foot-ground contact " + }, + { + "bbox": [ + 50, + 357, + 295, + 456 + ], + "type": "inline_equation", + "content": "(v_{f})" + }, + { + "bbox": [ + 50, + 357, + 295, + 456 + ], + "type": "text", + "content": " and a neural network based root velocity estimator " + }, + { + "bbox": [ + 50, + 357, + 295, + 456 + ], + "type": "inline_equation", + "content": "(v_{e})" + }, + { + "bbox": [ + 50, + 357, + 295, + 456 + ], + "type": "text", + "content": ". We fuse the output of the two submodules to obtain a final estimate of global translation." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 462, + 295, + 710 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "spans": [ + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "text", + "content": "3.3.1 Foot-Ground Contact based Root Velocity " + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "inline_equation", + "content": "(v_{f})" + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "text", + "content": ". Here we estimate the probability of each foot contacting the ground independently using a bi-LSTM network. The input to the model is the concatenated vector of joint positions and IMU measurements. The output of the network is the likelihood that each foot is contacting the ground, denoted as " + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "inline_equation", + "content": "c_{foot} = [c_{lfoot}, c_{rfoot}] \\in \\mathbb{R}^2" + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "text", + "content": ". The foot with the higher foot-ground contact probability is defined as the supporting foot, " + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "inline_equation", + "content": "s = \\max \\{c_{\\mathrm{foot}}, c_{\\mathrm{rfoot}}\\}" + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "text", + "content": ". The root velocity, " + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "inline_equation", + "content": "v_{f}(t) \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 50, + 462, + 295, + 627 + ], + "type": "text", + "content": ", is then computed as the coordinate difference of the supporting foot between consecutive frames. This approach helps capture natural body motions, as movement is significantly influenced by the supporting foot's dynamics [37]. For example, when walking, the body's movement is propelled forward and stabilized by the foot contacting the ground. The network is trained using binary cross-entropy loss." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": "3.3.2 Neural Network based Root Velocity " + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "inline_equation", + "content": "(v_{e})" + }, + { + "bbox": [ + 50, + 632, + 295, + 710 + ], + "type": "text", + "content": ". While the supporting foot contact based method yields plausible human movement, it inherently fails when both feet are not contacting the ground (e.g., when running or jumping). To accommodate such cases, we estimate per-frame root velocity directly using a neural network. We again use the predicted joint coordinates and IMU measurements as input. Compared to previous submodules that use a bi-LSTM for" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 313, + 84, + 560, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 560, + 129 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 560, + 129 + ], + "type": "text", + "content": "prediction, this module uses a unidirectional LSTM due to its capacity to capture longer historical context. The output is per-frame root velocity, denoted as " + }, + { + "bbox": [ + 313, + 84, + 560, + 129 + ], + "type": "inline_equation", + "content": "v_{e}(t) \\in \\mathbb{R}^{3}" + }, + { + "bbox": [ + 313, + 84, + 560, + 129 + ], + "type": "text", + "content": ". The network is trained using a cumulative L2 loss [49]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "spans": [ + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "content": "3.3.3 Module Fusion. Both modules offer different trade-offs in terms of predicting translation. Supporting foot provides more realistic estimates by leveraging human kinematics but fails when both feet are off the ground. On the other hand, directly estimating root velocity is more general but is highly prone to unnatural movements such as foot sliding [52]. To achieve the benefits of both, we adopt the heuristic-based fusion approach, inspired by TransPose [49]. In summary, when the foot contact " + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "content": " is higher than an upper-threshold " + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "inline_equation", + "content": "\\overline{q}" + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "content": ", we are confident of ground contact by a foot and hence we rely on " + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "inline_equation", + "content": "(v_{f})" + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "content": " for translation estimation. When the foot contact is below a lower-threshold, " + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "content": ", we rely on " + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "inline_equation", + "content": "(v_{e})" + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "content": ". For intermediate probabilities, we fuse both velocity estimations using a weighted sum, to output the final global velocity estimate " + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 314, + 135, + 560, + 279 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 393, + 283, + 559, + 309 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 393, + 283, + 559, + 309 + ], + "spans": [ + { + "bbox": [ + 393, + 283, + 559, + 309 + ], + "type": "interline_equation", + "content": "v = \\frac {q - \\bar {q}}{\\underline {{q}} - \\bar {q}} v _ {e} + \\frac {q - \\underline {{q}}}{\\bar {q} - \\underline {{q}}} v _ {f} \\tag {6}", + "image_path": "012c9012e1a82bf3c97763379fc6abf4fc5f128e7f2716153832ccdd13538073.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 315, + 529, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 315, + 529, + 327 + ], + "spans": [ + { + "bbox": [ + 314, + 315, + 529, + 327 + ], + "type": "text", + "content": "Following previous work [49], we use " + }, + { + "bbox": [ + 314, + 315, + 529, + 327 + ], + "type": "inline_equation", + "content": "q = 0.5" + }, + { + "bbox": [ + 314, + 315, + 529, + 327 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 315, + 529, + 327 + ], + "type": "inline_equation", + "content": "\\overline{q} = 0.9" + }, + { + "bbox": [ + 314, + 315, + 529, + 327 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 337, + 477, + 349 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 337, + 477, + 349 + ], + "spans": [ + { + "bbox": [ + 315, + 337, + 477, + 349 + ], + "type": "text", + "content": "3.4 Physics-Aware Refinement" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "spans": [ + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "text", + "content": "Our pose and translation estimation networks output the user's global pose based on a history of IMU measurements. When trained on sufficiently large amounts of data, the full-body pose estimation and global translation estimation neural networks learn the human motion manifold and produce realistic poses. However, despite the best modeling efforts, the outputs may still contain inter-mesh penetration, temporal artifacts such as jitter, foot-floor penetration and foot skating. To address these issues, we add an off-the-shelf physics motion optimizer [48]. The physics optimizer uses two proportional derivative (PD) controllers to compute the desired acceleration of the simulated character that best reproduces the estimated pose while satisfying physical constraints, such as the equation of motion [12]. The inputs to the physics optimizer are the estimated joint angles " + }, + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "text", + "content": ", the foot-ground contact probabilities " + }, + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "inline_equation", + "content": "c_{foot}" + }, + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "text", + "content": ", and the neural network based root velocity " + }, + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "inline_equation", + "content": "v_{e}" + }, + { + "bbox": [ + 313, + 351, + 559, + 549 + ], + "type": "text", + "content": ". The outputs are the optimized joint angles and global translation with reduced jitter and foot-ground penetration (Figure 4). For a detailed overview of the physics optimizer, we refer readers to PIP [48]." + } + ] + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 317, + 567, + 559, + 674 + ], + "blocks": [ + { + "bbox": [ + 317, + 567, + 559, + 674 + ], + "lines": [ + { + "bbox": [ + 317, + 567, + 559, + 674 + ], + "spans": [ + { + "bbox": [ + 317, + 567, + 559, + 674 + ], + "type": "image", + "image_path": "a5fdf6041545d3658902b169cb999bb2423047dfed789e142ee272c001e87cc9.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 684, + 559, + 707 + ], + "lines": [ + { + "bbox": [ + 314, + 684, + 559, + 707 + ], + "spans": [ + { + "bbox": [ + 314, + 684, + 559, + 707 + ], + "type": "text", + "content": "Figure 4: Demonstration of the physics optimizer's ability to reduce foot-ground penetration." + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_caption" + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "text", + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "text", + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 83, + 178, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 83, + 178, + 95 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 178, + 95 + ], + "type": "text", + "content": "3.5 Real-time Inference" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 99, + 294, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 99, + 294, + 153 + ], + "spans": [ + { + "bbox": [ + 50, + 99, + 294, + 153 + ], + "type": "text", + "content": "We implement proof-of-concept applications in iOS, using an Apple iPhone 15 Pro, Apple Watch Series 9 and Apple AirPods Pro. The iPhone, Apple Watch and AirPods sample IMU data at 60, 60 and " + }, + { + "bbox": [ + 50, + 99, + 294, + 153 + ], + "type": "inline_equation", + "content": "25\\mathrm{Hz}" + }, + { + "bbox": [ + 50, + 99, + 294, + 153 + ], + "type": "text", + "content": " respectively. For uniformity, we convert all the IMU data to " + }, + { + "bbox": [ + 50, + 99, + 294, + 153 + ], + "type": "inline_equation", + "content": "60\\mathrm{Hz}" + }, + { + "bbox": [ + 50, + 99, + 294, + 153 + ], + "type": "text", + "content": " by upsampling the AirPods." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 153, + 294, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 153, + 294, + 262 + ], + "spans": [ + { + "bbox": [ + 50, + 153, + 294, + 262 + ], + "type": "text", + "content": "We employ the active device selection strategy proposed by IMUPoser [28], wherein the UWB and inertial data is used to track the active devices and their on-body locations. For initial prototyping, the Apple Watch and AirPods communicate over Bluetooth to the iPhone, which streams data to a MacBook Air 2022 via socket. Post connection, a small calibration step is performed to align the IMU measurements with the training data, similar to prior work [14, 28, 49]. Following the setup, data is streamed to the laptop for pre-processing, inference and then relayed to Unity applications for visualization." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 262, + 294, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 262, + 294, + 319 + ], + "spans": [ + { + "bbox": [ + 50, + 262, + 294, + 319 + ], + "type": "text", + "content": "To further prototype an on-device edge model, we convert our trained PyTorch model into CoreML with mixed precision quantization and evaluate its performance. On an iPhone 15 Pro, our model incurs " + }, + { + "bbox": [ + 50, + 262, + 294, + 319 + ], + "type": "inline_equation", + "content": "\\sim 14\\mathrm{ms}" + }, + { + "bbox": [ + 50, + 262, + 294, + 319 + ], + "type": "text", + "content": " model inference time running at " + }, + { + "bbox": [ + 50, + 262, + 294, + 319 + ], + "type": "inline_equation", + "content": "60\\mathrm{Hz}" + }, + { + "bbox": [ + 50, + 262, + 294, + 319 + ], + "type": "text", + "content": ", capped by input IMU sampling rate." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 327, + 288, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 327, + 288, + 338 + ], + "spans": [ + { + "bbox": [ + 50, + 327, + 288, + 338 + ], + "type": "text", + "content": "4 DATA SYNTHESIS AND MODEL TRAINING" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 342, + 295, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 342, + 295, + 386 + ], + "spans": [ + { + "bbox": [ + 50, + 342, + 295, + 386 + ], + "type": "text", + "content": "Model training requires a large collection of synchronized IMU measurements and corresponding SMPL body poses. We leverage the AMASS [26] MoCap dataset, which provides an extensive collection of such data(~40 hours), including translation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 395, + 211, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 395, + 211, + 407 + ], + "spans": [ + { + "bbox": [ + 50, + 395, + 211, + 407 + ], + "type": "text", + "content": "4.1 Full-Body Pose Estimation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 409, + 295, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 409, + 295, + 519 + ], + "spans": [ + { + "bbox": [ + 50, + 409, + 295, + 519 + ], + "type": "text", + "content": "Our models expect IMU measurements as input. We synthesize IMU data following the approach proposed in DIP [14]. In summary, we place virtual sensors on the corresponding SMPL mesh vertices (left and right wrists, left and right pockets, and the head) and obtain joint rotations via limb orientations, while acceleration values are computed using finite differences. During training, we scale down the acceleration by a factor of " + }, + { + "bbox": [ + 50, + 409, + 295, + 519 + ], + "type": "inline_equation", + "content": "30m / s^2" + }, + { + "bbox": [ + 50, + 409, + 295, + 519 + ], + "type": "text", + "content": ", such that its values are on a similar scale to orientations, for better learning. Of note, we do not normalize our IMU measurements to a root joint (e.g., the pelvis), as the number of available devices can vary." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 529, + 230, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 529, + 230, + 540 + ], + "spans": [ + { + "bbox": [ + 50, + 529, + 230, + 540 + ], + "type": "text", + "content": "4.2 Global Translation Estimation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "spans": [ + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "text", + "content": "The translation estimation networks require (1) binary labels for foot-ground contact states and (2) per-frame root velocity values. To generate foot-ground contact states, we assume that a foot in contact with the ground displays very little movement between frames. Therefore, when the movement of one foot between consecutive frames is less than a threshold " + }, + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "text", + "content": ", then we consider it to be contacting the ground. We set " + }, + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "inline_equation", + "content": "u = 0.008" + }, + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "text", + "content": ", following previous work [49]. To train " + }, + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "inline_equation", + "content": "v_{e}" + }, + { + "bbox": [ + 50, + 543, + 295, + 663 + ], + "type": "text", + "content": ", we require per-frame root velocities. Since the AMASS dataset provides root position data, we can compute root velocities as the coordinate difference of the root position between consecutive frames." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 673, + 227, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 673, + 227, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 673, + 227, + 685 + ], + "type": "text", + "content": "4.3 Training Setup and Procedure" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "type": "text", + "content": "We train our models on a NVIDIA A40 GPU, which takes roughly a day for all modules and device-combinations. In total, our model has" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 84, + 559, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 129 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 129 + ], + "type": "inline_equation", + "content": "\\sim 6.7M" + }, + { + "bbox": [ + 313, + 84, + 559, + 129 + ], + "type": "text", + "content": " trainable parameters. Each module is trained separately using a batch size of 256 and the Adam optimizer [21] with a learning rate of " + }, + { + "bbox": [ + 313, + 84, + 559, + 129 + ], + "type": "inline_equation", + "content": "\\mathrm{lr} = 10^{-3}" + }, + { + "bbox": [ + 313, + 84, + 559, + 129 + ], + "type": "text", + "content": " for 80 epochs. We also apply a gradient clipping with norm of 1, to prevent the gradients from exploding." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "spans": [ + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": "During training of " + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{\\theta}" + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "inline_equation", + "content": "v_{e}" + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "inline_equation", + "content": "v_{f}" + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": ", we add Gaussian noise with " + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "inline_equation", + "content": "\\sigma = 0.04" + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": " to the joint positions to prevent overfitting and deal with prediction errors from " + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{joint}" + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": ". We empirically set " + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "inline_equation", + "content": "\\lambda = 10^{-5}" + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": " when training " + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "inline_equation", + "content": "\\mathcal{F}^{\\theta}" + }, + { + "bbox": [ + 313, + 129, + 559, + 174 + ], + "type": "text", + "content": ", to encourage temporally smooth predictions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 314, + 183, + 404, + 194 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 183, + 404, + 194 + ], + "spans": [ + { + "bbox": [ + 314, + 183, + 404, + 194 + ], + "type": "text", + "content": "5 EVALUATION" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 198, + 559, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 198, + 559, + 243 + ], + "spans": [ + { + "bbox": [ + 313, + 198, + 559, + 243 + ], + "type": "text", + "content": "We systematically isolate and analyze the efficacy of MobilePoser across different datasets, evaluation metrics and protocols. We show both qualitative and quantitative results, and also run ablation studies to evaluate our translation estimation design choices." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 252, + 384, + 263 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 252, + 384, + 263 + ], + "spans": [ + { + "bbox": [ + 315, + 252, + 384, + 263 + ], + "type": "text", + "content": "5.1 Datasets" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 314, + 266, + 559, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 266, + 559, + 289 + ], + "spans": [ + { + "bbox": [ + 314, + 266, + 559, + 289 + ], + "type": "text", + "content": "We evaluate MobilePoser on three real-world, inertial datasets, summarized in Table 2:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 331, + 292, + 564, + 433 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 331, + 292, + 559, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 292, + 559, + 346 + ], + "spans": [ + { + "bbox": [ + 331, + 292, + 559, + 346 + ], + "type": "text", + "content": "- DIP-IMU [14] contains data from 10 participants, collected using commercial-grade Xsens [45] IMUs at " + }, + { + "bbox": [ + 331, + 292, + 559, + 346 + ], + "type": "inline_equation", + "content": "60\\mathrm{Hz}" + }, + { + "bbox": [ + 331, + 292, + 559, + 346 + ], + "type": "text", + "content": ". It includes a rich variety of activities such as arm raises, stretches, lunges, squats, and punches. However, DIP-IMU does not contain global translation data." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 331, + 346, + 559, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 346, + 559, + 389 + ], + "spans": [ + { + "bbox": [ + 331, + 346, + 559, + 389 + ], + "type": "text", + "content": "- TotalCapture [40] provides real IMU measurements with ground-truth pose and translation, captured using commercial Xsens IMUs at " + }, + { + "bbox": [ + 331, + 346, + 559, + 389 + ], + "type": "inline_equation", + "content": "60\\mathrm{Hz}" + }, + { + "bbox": [ + 331, + 346, + 559, + 389 + ], + "type": "text", + "content": ". Following PIP [48], we re-calibrate the acceleration measurements to account for constant bias." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 331, + 390, + 564, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 390, + 564, + 433 + ], + "spans": [ + { + "bbox": [ + 331, + 390, + 564, + 433 + ], + "type": "text", + "content": "- IMUPoser [28] is collected from 10 participants using consumer-grade devices: an iPhone 11 Pro, Apple Watch Series 6, and AirPods, at " + }, + { + "bbox": [ + 331, + 390, + 564, + 433 + ], + "type": "inline_equation", + "content": "25\\mathrm{Hz}" + }, + { + "bbox": [ + 331, + 390, + 564, + 433 + ], + "type": "text", + "content": ". It provides ground-truth pose and global translation data." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 447, + 474, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 447, + 474, + 460 + ], + "spans": [ + { + "bbox": [ + 315, + 447, + 474, + 460 + ], + "type": "text", + "content": "5.2 Full-Body Pose Estimation" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 462, + 559, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 462, + 559, + 484 + ], + "spans": [ + { + "bbox": [ + 314, + 462, + 559, + 484 + ], + "type": "text", + "content": "5.2.1 Evaluation Metrics. Like prior work, we use the following evaluation metrics for pose estimation (lower is better for all):" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 331, + 487, + 559, + 596 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 331, + 487, + 559, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 487, + 559, + 509 + ], + "spans": [ + { + "bbox": [ + 331, + 487, + 559, + 509 + ], + "type": "text", + "content": "- Mean Per Joint Rotation Error (MPJRE): Measure of mean angular error across all root aligned joints in degrees " + }, + { + "bbox": [ + 331, + 487, + 559, + 509 + ], + "type": "inline_equation", + "content": "(^{\\circ})" + }, + { + "bbox": [ + 331, + 487, + 559, + 509 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 331, + 509, + 559, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 509, + 559, + 540 + ], + "spans": [ + { + "bbox": [ + 331, + 509, + 559, + 540 + ], + "type": "text", + "content": "- Mean Per Joint Position Error (MPJPE): Measure of mean Euclidean distance error across all root aligned joints in centimeters (cm)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 331, + 541, + 559, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 541, + 559, + 574 + ], + "spans": [ + { + "bbox": [ + 331, + 541, + 559, + 574 + ], + "type": "text", + "content": "- Mean Per Joint Vertex Error (MPJVE): Measure of mean Euclidean distance error across all root aligned vertices of the SMPL body mesh in centimeters (cm)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 331, + 574, + 559, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 574, + 559, + 596 + ], + "spans": [ + { + "bbox": [ + 331, + 574, + 559, + 596 + ], + "type": "text", + "content": "- Mean Per Joint Jitter (Jitter): Measure of mean jerk across all body joints of the predicted motion in " + }, + { + "bbox": [ + 331, + 574, + 559, + 596 + ], + "type": "inline_equation", + "content": "m / s^3" + }, + { + "bbox": [ + 331, + 574, + 559, + 596 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 598, + 559, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 598, + 559, + 621 + ], + "spans": [ + { + "bbox": [ + 314, + 598, + 559, + 621 + ], + "type": "text", + "content": "We use MPJVE as our primary metric of evaluation for ease of comparison with prior work [28]." + } + ] + } + ], + "index": 31 + }, + { + "type": "table", + "bbox": [ + 326, + 637, + 548, + 683 + ], + "blocks": [ + { + "bbox": [ + 326, + 637, + 548, + 683 + ], + "lines": [ + { + "bbox": [ + 326, + 637, + 548, + 683 + ], + "spans": [ + { + "bbox": [ + 326, + 637, + 548, + 683 + ], + "type": "table", + "html": "
DatasetCapture DeviceTranslationData FPS
DIP-IMUCommercial×60 Hz
TotalCaptureCommercial60 Hz
IMUPoserConsumer25 Hz
", + "image_path": "99ed87bf7a0cfd7d8516898be6c0c8c9a3be2623360a9c68576d7a576fcbccca.jpg" + } + ] + } + ], + "index": 32, + "angle": 0, + "type": "table_body" + } + ], + "index": 32 + }, + { + "bbox": [ + 315, + 684, + 559, + 693 + ], + "lines": [ + { + "bbox": [ + 315, + 684, + 559, + 693 + ], + "spans": [ + { + "bbox": [ + 315, + 684, + 559, + 693 + ], + "type": "text", + "content": "Table 2: Real-world IMU datasets for MobilePoser Evaluation." + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 200, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 200, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 200, + 69 + ], + "type": "text", + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "text", + "content": "Xu, et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 66, + 83, + 279, + 228 + ], + "blocks": [ + { + "bbox": [ + 66, + 83, + 279, + 228 + ], + "lines": [ + { + "bbox": [ + 66, + 83, + 279, + 228 + ], + "spans": [ + { + "bbox": [ + 66, + 83, + 279, + 228 + ], + "type": "image", + "image_path": "ede03b447a6f857c0266e6eff4f01b390fd362031a467d8cdaa8a83ca3d76bf3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 239, + 295, + 274 + ], + "lines": [ + { + "bbox": [ + 50, + 239, + 295, + 274 + ], + "spans": [ + { + "bbox": [ + 50, + 239, + 295, + 274 + ], + "type": "text", + "content": "Figure 5: Comparison of MobilePoser's Full-Body Pose Estimation Error across different Evaluation Protocols on the DIP-IMU, IMUPoser and TotalCapture dataset respectively." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 293, + 295, + 326 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 293, + 295, + 326 + ], + "spans": [ + { + "bbox": [ + 50, + 293, + 295, + 326 + ], + "type": "text", + "content": "5.2.2 Evaluation Protocol. We outline three evaluation protocols for training and fine-tuning to evaluate MobilePoser's efficacy across different data sources and noise profiles." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 328, + 295, + 437 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 67, + 328, + 295, + 349 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 328, + 295, + 349 + ], + "spans": [ + { + "bbox": [ + 67, + 328, + 295, + 349 + ], + "type": "text", + "content": "- Base Model: We train our model on the synthetic data generated on the AMASS dataset." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 350, + 294, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 350, + 294, + 393 + ], + "spans": [ + { + "bbox": [ + 67, + 350, + 294, + 393 + ], + "type": "text", + "content": "- Finetune DIP-IMU: Like prior work, we train on AMASS and then fine-tune on 8 DIP-IMU participants. The 2 holdout participants are used for testing the Finetune DIP-IMU model on the DIP-IMU dataset." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 395, + 294, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 395, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 67, + 395, + 294, + 437 + ], + "type": "text", + "content": "- Finetune IMUPoser: We train on AMASS and fine-tune on the first 8 IMUPoser participants. The 2 holdout participants are used for testing the Finetune IMUPoser model on the IMUPoser dataset." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "text", + "content": "5.2.3 Accuracy across Datasets. Figure 5 shows our full-body pose estimation accuracy for all three protocols across the three datasets listed in Section 5.1. Averaged across all three datasets, the MPJVE for the Base Model, Finetune DIP-IMU and Finetune IMUPoser protocols are 11.89, 11.73 and " + }, + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "inline_equation", + "content": "11.33\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "text", + "content": " respectively. It is interesting to note that the addition of commercial-grade IMU data (Finetune DIP-IMU) only improves accuracy by " + }, + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "inline_equation", + "content": "1.3\\%" + }, + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "text", + "content": " over the base model, while the addition of noisy IMU data from consumer devices (Finetune IMUPoser) results in a bigger improvement of " + }, + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "inline_equation", + "content": "4.7\\%" + }, + { + "bbox": [ + 50, + 445, + 295, + 544 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 550, + 295, + 710 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 50, + 550, + 295, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 550, + 295, + 649 + ], + "spans": [ + { + "bbox": [ + 50, + 550, + 295, + 649 + ], + "type": "text", + "content": "5.2.4 Accuracy across Activities. We further analyze results on different activities on the IMUPoser dataset, as it provides activity label meta-data. MobilePoser's accuracy generalizes across most everyday activity contexts: the error (MPJVE) for locomotion is 8.2 cm (walking 7.6 cm, jogging 8.8 cm), exercises is 10 cm (kicking: 7.5 cm, jumping jacks: 11.1 cm, boxing: 11.5 cm), sitting is 11.5 cm and freestyle motions such as tennis and basketball are 9.1 cm and 11.7 cm respectively. The accuracy degrades for postures with the user lying/facing down, e.g. push-ups have higher error of 16.1 cm." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "content": "5.2.5 Comparison with prior work. To aid in direct comparison with prior work [14, 28, 48, 49], we now make use of the Finetune DIP-IMU evaluation protocol, that is training a base model on the synthetic IMU data from AMASS and fine-tuning it on the 8 participants from DIP-IMU dataset. Tables 1 and 3 offer a quantitative" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "type": "table", + "bbox": [ + 331, + 81, + 543, + 150 + ], + "blocks": [ + { + "bbox": [ + 331, + 81, + 543, + 150 + ], + "lines": [ + { + "bbox": [ + 331, + 81, + 543, + 150 + ], + "spans": [ + { + "bbox": [ + 331, + 81, + 543, + 150 + ], + "type": "table", + "html": "
System# Inst. JointsMPJREMPJVEJitter
DIP617.2°11.23.62
TransPose612.8°7.40.95
PIP612.1°6.50.20
IMUPoser1-325.6°15.41.30
MobilePoser1-323.7°12.60.55
", + "image_path": "f1aa1fb87ee39eb83a3a850d85c7930c38f8b9d2834c06689b08d09eb640adb7.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 151, + 560, + 171 + ], + "lines": [ + { + "bbox": [ + 315, + 151, + 560, + 171 + ], + "spans": [ + { + "bbox": [ + 315, + 151, + 560, + 171 + ], + "type": "text", + "content": "Table 3: Comparison with key prior work on the TotalCapture dataset." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 204, + 559, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 204, + 559, + 291 + ], + "spans": [ + { + "bbox": [ + 313, + 204, + 559, + 291 + ], + "type": "text", + "content": "comparison against key prior work, evaluated on the DIP-IMU and TotalCapture, dataset respectively. Given that our system targets a very sparse configuration of IMUs (1-3), it is unsurprising that we perform worse than systems utilizing 6 IMUs, strategically placed around the body. On the DIP-IMU and TotalCapture dataset, compared to IMUPoser, which considers the same device-location combinations, we perform significantly better displaying a " + }, + { + "bbox": [ + 313, + 204, + 559, + 291 + ], + "type": "inline_equation", + "content": "12.4\\%" + }, + { + "bbox": [ + 313, + 204, + 559, + 291 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 204, + 559, + 291 + ], + "type": "inline_equation", + "content": "18.2\\%" + }, + { + "bbox": [ + 313, + 204, + 559, + 291 + ], + "type": "text", + "content": " decrease in vertex error respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "spans": [ + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "text", + "content": "On the IMUPoser dataset, Figure 7 (A) provides a detailed breakdown of accuracy for different on-body device locations. Averaging across the 1, 2 and 3 device conditions, MobilePoser outperforms IMUPoser by " + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "inline_equation", + "content": "24.1\\%" + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "inline_equation", + "content": "14.2\\%" + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "inline_equation", + "content": "8.7\\%" + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "text", + "content": " respectively. Furthermore, Figure 7 (B) provides an accuracy breakdown for the instrumented and non-instrumented joints in comparison with IMUPoser. If a limb has an IMU placed on any part, we consider all the joints pertaining to it as instrumented joints, while the rest are marked as non-instrumented. MobilePoser is " + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "inline_equation", + "content": "18.1\\%" + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "inline_equation", + "content": "17.4\\%" + }, + { + "bbox": [ + 313, + 291, + 559, + 423 + ], + "type": "text", + "content": " better than IMUPoser for predicting instrumented and non-instrumented joints respectively. This can be seen in Figure 6 which depicts a visual comparison of our pose estimation with IMUPoser." + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 334, + 437, + 541, + 674 + ], + "blocks": [ + { + "bbox": [ + 334, + 437, + 541, + 674 + ], + "lines": [ + { + "bbox": [ + 334, + 437, + 541, + 674 + ], + "spans": [ + { + "bbox": [ + 334, + 437, + 541, + 674 + ], + "type": "image", + "image_path": "41dd9e935a6e838257beb11b887e0433dc3580897df48f9c7643911446cbaaf3.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 684, + 559, + 706 + ], + "lines": [ + { + "bbox": [ + 314, + 684, + 559, + 706 + ], + "spans": [ + { + "bbox": [ + 314, + 684, + 559, + 706 + ], + "type": "text", + "content": "Figure 6: Qualitative comparisons between our method and IMUPoser on the DIP-IMU and IMUPoser dataset." + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "text", + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "text", + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 53, + 86, + 558, + 302 + ], + "blocks": [ + { + "bbox": [ + 53, + 86, + 558, + 302 + ], + "lines": [ + { + "bbox": [ + 53, + 86, + 558, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 86, + 558, + 302 + ], + "type": "image", + "image_path": "24a6ce53daf981e05ba77dd84c8966fdf8230686765f953522e471c00433664e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 315, + 558, + 338 + ], + "lines": [ + { + "bbox": [ + 50, + 315, + 558, + 338 + ], + "spans": [ + { + "bbox": [ + 50, + 315, + 558, + 338 + ], + "type": "text", + "content": "Figure 7: MPJVE comparison between IMUPoser and MobilePoser (our system) on the IMUPoser Dataset for: (A) Different on-body device combinations (B) Instrumented vs Non Instrumented joints." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 354, + 230, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 354, + 230, + 364 + ], + "spans": [ + { + "bbox": [ + 51, + 354, + 230, + 364 + ], + "type": "text", + "content": "5.3 Global Translation Estimation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 368, + 295, + 540 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 50, + 368, + 295, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 368, + 295, + 444 + ], + "spans": [ + { + "bbox": [ + 50, + 368, + 295, + 444 + ], + "type": "text", + "content": "5.3.1 Evaluation Protocol. We evaluate our Global Translation Estimation module on the TotalCapture and IMUPoser datasets, as DIP-IMU lacks translation data. Like prior work [48, 49], we use the Finetune DIP-IMU protocol (Section 5.2.2), that is we train on AMASS and fine-tune on 8 participants of DIP-IMU to track the Root Translation Error (Euclidean norm of the cumulative distance errors within 1 second)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "spans": [ + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "text", + "content": "5.3.2 Accuracy across Datasets and Body Regions. On the Total-Capture and IMUPoser dataset, our mean root translation error across all device combinations is 27.55 and " + }, + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "inline_equation", + "content": "17.63\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "text", + "content": " respectively. Interestingly, for both IMUPoser and TotalCapture datasets, we observe only a slight decrease in error when increasing the number of devices from one to two " + }, + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "inline_equation", + "content": "(6.1\\%)" + }, + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "text", + "content": " and no significant improvement " + }, + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "inline_equation", + "content": "(4.0\\%)" + }, + { + "bbox": [ + 50, + 452, + 295, + 540 + ], + "type": "text", + "content": " when increasing from two devices to three. Analysing the error across different body regions for the single device scenario" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 50, + 558, + 294, + 654 + ], + "blocks": [ + { + "bbox": [ + 50, + 558, + 294, + 654 + ], + "lines": [ + { + "bbox": [ + 50, + 558, + 294, + 654 + ], + "spans": [ + { + "bbox": [ + 50, + 558, + 294, + 654 + ], + "type": "image", + "image_path": "36e1570002c8570b361bdc0aa62d67edbc98118850cfb1dbe5dae9fc5bd7f18b.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 662, + 295, + 707 + ], + "lines": [ + { + "bbox": [ + 50, + 662, + 295, + 707 + ], + "spans": [ + { + "bbox": [ + 50, + 662, + 295, + 707 + ], + "type": "text", + "content": "Figure 8: (A) Comparison of cumulative translation error for different instrumented joints on the IMUPoser and Total-Capture dataset. (B) Evaluation of cumulative distance errors with respect to time." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "spans": [ + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "text", + "content": "(Figure 8) (A), we see that a device in the pocket has a much lower error " + }, + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "inline_equation", + "content": "(14.8\\mathrm{cm})" + }, + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "text", + "content": " compared to that on the wrist " + }, + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "inline_equation", + "content": "(25.7\\mathrm{cm})" + }, + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "text", + "content": " or the head " + }, + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "inline_equation", + "content": "(29.7\\mathrm{cm})" + }, + { + "bbox": [ + 313, + 354, + 560, + 419 + ], + "type": "text", + "content": ". This can be attributed to the legs capturing most of the locomotion data during translation, resulting in marginal gains from sensors on the upper-body. Figure 8 (B) shows the the cumulative distance error over time." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 426, + 560, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 426, + 560, + 525 + ], + "spans": [ + { + "bbox": [ + 313, + 426, + 560, + 525 + ], + "type": "text", + "content": "5.3.3 Ablation Study. We perform ablation studies to understand the impact of key components in our system and their effects on performance. At the core of our system lies a subtle yet powerful concept: higher-order digitization (e.g., body pose) improves lower-order digitizations (e.g., steps). To quantify this idea, we run an ablation study of our translation estimation technique using both IMU data and the corresponding full-body pose inferred from it versus using only IMU data. Figure 9 summarizes our results. Our IMU-only, direct regression has an error of " + }, + { + "bbox": [ + 313, + 426, + 560, + 525 + ], + "type": "inline_equation", + "content": "21.4\\mathrm{cm}" + }, + { + "bbox": [ + 313, + 426, + 560, + 525 + ], + "type": "text", + "content": " across both" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 339, + 540, + 536, + 674 + ], + "blocks": [ + { + "bbox": [ + 339, + 540, + 536, + 674 + ], + "lines": [ + { + "bbox": [ + 339, + 540, + 536, + 674 + ], + "spans": [ + { + "bbox": [ + 339, + 540, + 536, + 674 + ], + "type": "image", + "image_path": "7b97abca80b72e72c22ffcf61d1df2388fd2a3e720e8c40f16839719378dbc45.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 684, + 559, + 707 + ], + "lines": [ + { + "bbox": [ + 314, + 684, + 559, + 707 + ], + "spans": [ + { + "bbox": [ + 314, + 684, + 559, + 707 + ], + "type": "text", + "content": "Figure 9: Benefits of using high-order digitization (i.e., IMU inferred poses) for estimating global translation." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "text", + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "text", + "content": "Xu, et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 83, + 173, + 258 + ], + "blocks": [ + { + "bbox": [ + 52, + 83, + 173, + 258 + ], + "lines": [ + { + "bbox": [ + 52, + 83, + 173, + 258 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 173, + 258 + ], + "type": "image", + "image_path": "9a40e3573eaab617538725d13e6c909e5c242d39f4c53f843dea1f2131d3bbad.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 269, + 295, + 291 + ], + "lines": [ + { + "bbox": [ + 50, + 269, + 295, + 291 + ], + "spans": [ + { + "bbox": [ + 50, + 269, + 295, + 291 + ], + "type": "text", + "content": "Figure 10: Example indoor navigation application where MobilePoser digitizes multiple users within an office space." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 173, + 83, + 294, + 258 + ], + "blocks": [ + { + "bbox": [ + 173, + 83, + 294, + 258 + ], + "lines": [ + { + "bbox": [ + 173, + 83, + 294, + 258 + ], + "spans": [ + { + "bbox": [ + 173, + 83, + 294, + 258 + ], + "type": "image", + "image_path": "468b8d082bb0ee2c8c8e2bf1858a5b7eddc0b0a61d02d612a1e9d13acd5a000c.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "text", + "content": "datasets, while our integrated " + }, + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "inline_equation", + "content": "(\\mathrm{IMU} + \\mathrm{IMU}" + }, + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "text", + "content": " inferred pose) approach decreases error by " + }, + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "inline_equation", + "content": "29.4\\%" + }, + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "inline_equation", + "content": "15.1~\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 312, + 294, + 334 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "spans": [ + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "content": "Building on the multi-stage architecture, we further evaluate the impact of two additional components: jerk loss and physics refinement. These elements were designed to enhance motion smoothness and physical plausibility. For the IMUPoser dataset, the jerk loss reduces jitter by " + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "inline_equation", + "content": "23.9\\%" + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "content": " and translation error by " + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "inline_equation", + "content": "3.33\\%" + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "content": ", but increases mean pose error by " + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "inline_equation", + "content": "0.05\\%" + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "content": ". Further, the physics-aware refinement reduces jitter by " + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "inline_equation", + "content": "29.7\\%" + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "content": " and translation error by " + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "inline_equation", + "content": "0.4\\%" + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "content": ", but increases the mean pose error by " + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "inline_equation", + "content": "0.7\\%" + }, + { + "bbox": [ + 50, + 335, + 295, + 466 + ], + "type": "text", + "content": ". The negligible increase in mean pose error is expected, as it may occasionally over-smooth the motion. This phenomenon is also seen in the PIP [48]. We believe that significant improvements in jitter and translation far outweigh the minimal increase in pose error, resulting in a more realistic motion." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 472, + 295, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 472, + 295, + 582 + ], + "spans": [ + { + "bbox": [ + 50, + 472, + 295, + 582 + ], + "type": "text", + "content": "5.3.4 Comparison with prior work. To the best of our knowledge, no other works have explored both full-body pose and translation from such a sparse set of commodity IMUs. IMUPoser [28], which also targets consumer devices, does not estimate global translation. On the TotalCapture dataset, TransPose (6 IMUs) has a translation error of " + }, + { + "bbox": [ + 50, + 472, + 295, + 582 + ], + "type": "inline_equation", + "content": "12.8\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 472, + 295, + 582 + ], + "type": "text", + "content": " while that of MobilePoser is " + }, + { + "bbox": [ + 50, + 472, + 295, + 582 + ], + "type": "inline_equation", + "content": "19.9\\mathrm{cm}" + }, + { + "bbox": [ + 50, + 472, + 295, + 582 + ], + "type": "text", + "content": " when a single IMU device is placed in the pocket. Unsurprisingly, a commercial grade, 6 IMU-based system has higher accuracy due to their waist and knee mounted sensors, which capture larger ranges of locomotion compared to devices carried in the pocket." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 593, + 151, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 593, + 151, + 604 + ], + "spans": [ + { + "bbox": [ + 51, + 593, + 151, + 604 + ], + "type": "text", + "content": "6 EXAMPLE USES" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 607, + 295, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 607, + 295, + 662 + ], + "spans": [ + { + "bbox": [ + 50, + 607, + 295, + 662 + ], + "type": "text", + "content": "MobilePoser enables full-body pose estimation with global motion tracking using devices that users already own, opening up a wide range of novel applications. This section showcases three proof-of-concept applications in indoor navigation, gaming, and healthcare to illustrate MobilePoser's unique capabilities and potential impact." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 673, + 254, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 673, + 254, + 685 + ], + "spans": [ + { + "bbox": [ + 50, + 673, + 254, + 685 + ], + "type": "text", + "content": "6.1 Indoor Localization and Navigation" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "type": "text", + "content": "To demonstrate MobilePoser's potential in this domain, we scan an office space using the PolyCam [34] LiDAR scanner app with" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 317, + 83, + 446, + 202 + ], + "blocks": [ + { + "bbox": [ + 317, + 83, + 446, + 202 + ], + "lines": [ + { + "bbox": [ + 317, + 83, + 446, + 202 + ], + "spans": [ + { + "bbox": [ + 317, + 83, + 446, + 202 + ], + "type": "image", + "image_path": "8bf1542bdaa768fc5e2a80f6613166a3fb2954882e1c0b9e4d765ccec6f2611a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 446, + 83, + 558, + 202 + ], + "blocks": [ + { + "bbox": [ + 446, + 83, + 558, + 202 + ], + "lines": [ + { + "bbox": [ + 446, + 83, + 558, + 202 + ], + "spans": [ + { + "bbox": [ + 446, + 83, + 558, + 202 + ], + "type": "image", + "image_path": "9ed4af4fa541a6111eb81d829e2a5dee85f3efe408f46523f63cfff3248702bf.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 317, + 203, + 446, + 323 + ], + "blocks": [ + { + "bbox": [ + 317, + 203, + 446, + 323 + ], + "lines": [ + { + "bbox": [ + 317, + 203, + 446, + 323 + ], + "spans": [ + { + "bbox": [ + 317, + 203, + 446, + 323 + ], + "type": "image", + "image_path": "2af18b4f56ea893a7d4db8d091c0a22c68ebfee9c664430ea05299bdf5efa3c4.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 314, + 334, + 559, + 366 + ], + "lines": [ + { + "bbox": [ + 314, + 334, + 559, + 366 + ], + "spans": [ + { + "bbox": [ + 314, + 334, + 559, + 366 + ], + "type": "text", + "content": "Figure 11: In this table tennis game users can move around the table freely and use their wrist-instrumented hand to control their racket." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 446, + 203, + 558, + 323 + ], + "blocks": [ + { + "bbox": [ + 446, + 203, + 558, + 323 + ], + "lines": [ + { + "bbox": [ + 446, + 203, + 558, + 323 + ], + "spans": [ + { + "bbox": [ + 446, + 203, + 558, + 323 + ], + "type": "image", + "image_path": "b463e6ba1e085e714896d2c0eb817f52b2064dd728a766208e5c2a436692c40d.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 386, + 559, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 386, + 559, + 496 + ], + "spans": [ + { + "bbox": [ + 313, + 386, + 559, + 496 + ], + "type": "text", + "content": "an Apple iPhone 15 Pro. As shown in Figure 10, multiple users walk through the virtual office space, with their interactions and movements seamlessly digitized and represented in real-time. Here, one user has a phone in their pocket and a watch on their wrist, while the other two only have a phone in their pocket. By leveraging the IMUs in these consumer devices, MobilePoser enables accurate indoor navigation and localization without the need for additional infrastructure or specialized hardware. This opens up exciting possibilities for applications such as indoor way finding, context-aware virtual assistants, and immersive virtual tours." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 314, + 506, + 481, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 506, + 481, + 519 + ], + "spans": [ + { + "bbox": [ + 314, + 506, + 481, + 519 + ], + "type": "text", + "content": "6.2 Mobile Gaming Experiences" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 521, + 559, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 559, + 630 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 559, + 630 + ], + "type": "text", + "content": "To showcase this potential, we developed a virtual table tennis game (Figure 11) that allows users to play remotely with others, similar to how Nintendo games are played in front of a TV. Each player has a phone in their pocket and a watch on the dominant (left) hand, which is controlling the racket. Players can freely move within their local space to control their avatars, adding a new level of physical interaction to the gaming experience. MobilePoser's ability to track full-body movements using everyday devices eliminates the need for specialized controllers, making immersive gaming experiences more accessible to a wider audience." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 640, + 446, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 640, + 446, + 651 + ], + "spans": [ + { + "bbox": [ + 314, + 640, + 446, + 651 + ], + "type": "text", + "content": "6.3 Fitness and Wellness" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 654, + 559, + 710 + ], + "type": "text", + "content": "MobilePoser has the potential to revolutionize fitness tracking and rehabilitation by providing accurate, real-time feedback on a user's movements and poses without the need for external sensors or camera setups. This enables users to monitor their exercise form, track progress, and receive personalized guidance using the devices" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 397, + 68 + ], + "type": "text", + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 410, + 60, + 559, + 69 + ], + "type": "text", + "content": "UIST'24,October 13-16,2024,Pittsburgh,PA,USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 293, + 258 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 293, + 258 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 293, + 258 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 293, + 258 + ], + "type": "image", + "image_path": "d8de858657724951663c9cad16d40d80050174bb0830339947de87e3c6a36abc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 269, + 295, + 301 + ], + "lines": [ + { + "bbox": [ + 50, + 269, + 295, + 301 + ], + "spans": [ + { + "bbox": [ + 50, + 269, + 295, + 301 + ], + "type": "text", + "content": "Figure 12: MobilePoser's full-body pose and locomotion can be used to automatically detect and count exercise repetitions, better estimate calories and monitor form." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 320, + 295, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 320, + 295, + 420 + ], + "spans": [ + { + "bbox": [ + 50, + 320, + 295, + 420 + ], + "type": "text", + "content": "they already own. In this example (Figure 12), a user performs a workout routine while MobilePoser captures the session using the IMU data from the smartphone in the user's pocket. This not only allows the user to review their performance and track progress over time but also enables remote monitoring by fitness instructors or physical therapists. Moreover, MobilePoser's ability to track full-body movements facilitates interactive rehabilitation regimens [4] and other passive health sensing applications such as gait analysis [30] or hyperactivity detection [6], among others." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 430, + 146, + 440 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 430, + 146, + 440 + ], + "spans": [ + { + "bbox": [ + 51, + 430, + 146, + 440 + ], + "type": "text", + "content": "7 OPEN SOURCE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 444, + 295, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 444, + 295, + 510 + ], + "spans": [ + { + "bbox": [ + 50, + 444, + 295, + 510 + ], + "type": "text", + "content": "To enable other researchers and practitioners to build upon our work, we release our pre-trained models, data pre-processing scripts, and model training code as open-source software at: https://github. com/SPICExLAB/MobilePoser. By making our work fully reproducible and extensible, we hope to accelerate research and development in the field of mobile motion capture using everyday devices." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 519, + 253, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 519, + 253, + 530 + ], + "spans": [ + { + "bbox": [ + 50, + 519, + 253, + 530 + ], + "type": "text", + "content": "8 LIMITATIONS AND FUTURE WORK" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 534, + 295, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 534, + 295, + 654 + ], + "spans": [ + { + "bbox": [ + 50, + 534, + 295, + 654 + ], + "type": "text", + "content": "While MobilePoser demonstrates promising results in estimating full-body pose and translation using minimal instrumentation, there are several limitations and opportunities for future work. First, as a purely inertial-based technique, MobilePoser's translation estimation is still susceptible to drift, particularly when devices deviate from their calibrated positions. This can occur when users wear loose clothing, causing the phone in the pocket to move around and resulting in orientation changes. To address this issue, future work could explore re-calibration techniques based on stationary poses or leverage additional sensory information, such as GPS, UWB or visual odometry, to correct for drift." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 295, + 710 + ], + "type": "text", + "content": "Second, akin to prior wor, our evaluation has limitations of being tested on lab collected datasets. All the test datasets (DIP, TotalCapture, IMUPoser) were collected in lab settings due to the need for an accurate external ground truth motion capture system. Although we empirically demonstrate that MobilePoser works in real-world" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 85, + 558, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 85, + 558, + 106 + ], + "spans": [ + { + "bbox": [ + 314, + 85, + 558, + 106 + ], + "type": "text", + "content": "settings (as seen in the accompanying video), we acknowledge the need for future datasets captured in-the-wild." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 106, + 559, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 106, + 559, + 216 + ], + "spans": [ + { + "bbox": [ + 313, + 106, + 559, + 216 + ], + "type": "text", + "content": "Another limitation of MobilePoser, much like other prior works [14, 28, 48, 49], is the need for a calibration step. Currently, users first stand in a T-pose, which aligns the IMU data with the training data based on the SMPL kinematic model. While this calibration process is acceptable for some use cases, such as gaming, it may be less desirable for applications that demand seamless interactions, like indoor navigation. Future work could investigate more natural and unobtrusive calibration procedures, such as detecting common poses like standing with arms by the side using UWB, similar to SmartPoser [11]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 314, + 217, + 558, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 217, + 558, + 261 + ], + "spans": [ + { + "bbox": [ + 314, + 217, + 558, + 261 + ], + "type": "text", + "content": "In conclusion, while MobilePoser presents a significant step forward in enabling full-body pose and translation estimation using everyday devices, there remain several avenues for future research to extend the capabilities of this approach." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 315, + 271, + 406, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 271, + 406, + 281 + ], + "spans": [ + { + "bbox": [ + 315, + 271, + 406, + 281 + ], + "type": "text", + "content": "9 CONCLUSION" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 285, + 559, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 285, + 559, + 429 + ], + "spans": [ + { + "bbox": [ + 313, + 285, + 559, + 429 + ], + "type": "text", + "content": "In this paper, we present MobilePoser, a real-time, on-device system for estimating full-body pose and translation using IMUs in consumer mobile devices (phones, watches, earbuds). By leveraging a multi-stage approach that combines data-driven learning and physics-based optimization, MobilePoser achieves state-of-the-art accuracy while remaining lightweight and efficient. Our extensive evaluation on public datasets demonstrates clear improvements over prior work, both in terms of full-body pose estimation accuracy and enabling novel global translation estimation. Furthermore, we showcase the potential of MobilePoser through a series of proof-of-concept applications in gaming, fitness, and indoor navigation, highlighting its ability to enable new and immersive experiences using the devices people already own." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 439, + 436, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 439, + 436, + 449 + ], + "spans": [ + { + "bbox": [ + 315, + 439, + 436, + 449 + ], + "type": "text", + "content": "ACKNOWLEDGMENTS" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 453, + 559, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 453, + 559, + 509 + ], + "spans": [ + { + "bbox": [ + 313, + 453, + 559, + 509 + ], + "type": "text", + "content": "We thank Jianru Ding from the University of Chicago and Zeya Chen from the Institute of Design, Illinois Institute of Technology for helping film the video. Vasco Xu's and Henry Hoffmann's work on this project is supported by NSF (CCF-1823032 and CNS-1956180)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 519, + 388, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 519, + 388, + 530 + ], + "spans": [ + { + "bbox": [ + 316, + 519, + 388, + 530 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 319, + 533, + 559, + 708 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 319, + 533, + 559, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 533, + 559, + 548 + ], + "spans": [ + { + "bbox": [ + 319, + 533, + 559, + 548 + ], + "type": "text", + "content": "[1] [n. d]. PlayStation VR. https://www.playstation.com/en-us/explore/playstationvr/." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 319, + 549, + 447, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 549, + 447, + 557 + ], + "spans": [ + { + "bbox": [ + 319, + 549, + 447, + 557 + ], + "type": "text", + "content": "[2] 2023. HTC Vive. https://www.vive.com." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 319, + 558, + 558, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 558, + 558, + 572 + ], + "spans": [ + { + "bbox": [ + 319, + 558, + 558, + 572 + ], + "type": "text", + "content": "[3] Karan Ahuja. 2024. Practical and Rich User Digitization. arXiv:2403.00153 [cs.HC] https://arxiv.org/abs/2403.00153" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 319, + 573, + 559, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 573, + 559, + 604 + ], + "spans": [ + { + "bbox": [ + 319, + 573, + 559, + 604 + ], + "type": "text", + "content": "[4] Karan Ahuja, Sven Mayer, Mayank Goel, and Chris Harrison. 2021. Pose-on-the-go: Approximating user pose with smartphone sensor fusion and inverse kinematics. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 1-12." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 319, + 605, + 559, + 637 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 605, + 559, + 637 + ], + "spans": [ + { + "bbox": [ + 319, + 605, + 559, + 637 + ], + "type": "text", + "content": "[5] Karan Ahuja, Vivian Shen, Cathy Mengying Fang, Nathan Riopelle, Andy Kong, and Chris Harrison. 2022. Controllerpose: inside-out body capture with VR controller cameras. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems. 1-13." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 319, + 638, + 559, + 677 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 638, + 559, + 677 + ], + "spans": [ + { + "bbox": [ + 319, + 638, + 559, + 677 + ], + "type": "text", + "content": "[6] Riku Arakawa, Karan Ahuja, Kristie Mak, Gwendolyn Thompson, Sam Shaaban, Oliver Lindhiem, and Mayank Goel. 2023. LemurDx: Using Unconstrained Passive Sensing for an Objective Measurement of Hyperactivity in Children with no Parent Input. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 2 (2023), 1-23." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 319, + 677, + 559, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 677, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 319, + 677, + 559, + 708 + ], + "type": "text", + "content": "[7] Riku Arakawa, Bing Zhou, Gurunandan Krishnan, Mayank Goel, and Shree K Nayar. 2023. MI-Poser: Human Body Pose Tracking Using Magnetic and Inertial Sensor Fusion with Metal Interference Mitigation. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 7, 3 (2023), 1-24." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 200, + 68 + ], + "type": "text", + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 531, + 60, + 558, + 68 + ], + "type": "text", + "content": "Xu, et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 86, + 295, + 700 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 55, + 86, + 294, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 86, + 294, + 110 + ], + "spans": [ + { + "bbox": [ + 55, + 86, + 294, + 110 + ], + "type": "text", + "content": "[8] Rayan Armani, Changlin Qian, Jiaxi Jiang, and Christian Holz. 2024. Ultra Inertial Poser: Scalable Motion Capture and Tracking from Sparse Inertial Sensors and Ultra-Wideband Ranging. In ACM SIGGRAPH 2024 Conference Papers. 1-11." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 110, + 295, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 110, + 295, + 150 + ], + "spans": [ + { + "bbox": [ + 55, + 110, + 295, + 150 + ], + "type": "text", + "content": "[9] Federica Bogo, Angjoo Kanazawa, Christoph Lassner, Peter Gehler, Javier Romero, and Michael J Black. 2016. Keep it SMPL: Automatic estimation of 3D human pose and shape from a single image. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part V 14. Springer, 561-578." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 150, + 295, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 150, + 295, + 182 + ], + "spans": [ + { + "bbox": [ + 53, + 150, + 295, + 182 + ], + "type": "text", + "content": "[10] Nathan Devrio and Chris Harrison. 2022. discoBand: Multiview Depth-Sensing Smartwatch Strap for Hand, Body and Environment Tracking. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-13." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 182, + 294, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 294, + 214 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 294, + 214 + ], + "type": "text", + "content": "[11] Nathan DeVrio, Vimal Mollyn, and Chris Harrison. 2023. SmartPoser: Arm Pose Estimation with a Smartphone and Smartwatch Using UWB and IMU Data. In Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology. 1-11." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 214, + 258, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 214, + 258, + 223 + ], + "spans": [ + { + "bbox": [ + 53, + 214, + 258, + 223 + ], + "type": "text", + "content": "[12] Roy Featherstone. 2014. Rigid body dynamics algorithms. Springer." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 223, + 294, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 223, + 294, + 255 + ], + "spans": [ + { + "bbox": [ + 53, + 223, + 294, + 255 + ], + "type": "text", + "content": "[13] Shubham Goel, Georgios Pavlakos, Jathushan Rajasegaran, Angjoo Kanazawa, and Jitendra Malik. 2023. Humans in 4d: Reconstructing and tracking humans with transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision. 14783-14794." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 255, + 294, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 255, + 294, + 286 + ], + "spans": [ + { + "bbox": [ + 53, + 255, + 294, + 286 + ], + "type": "text", + "content": "[14] Yinghao Huang, Manuel Kaufmann, Emre Aksan, Michael J Black, Otmar Hilliges, and Gerard Pons-Moll. 2018. Deep inertial pose: Learning to reconstruct human pose from sparse inertial measurements in real time. ACM Transactions on Graphics (TOG) 37, 6 (2018), 1-15." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 286, + 294, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 286, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 53, + 286, + 294, + 318 + ], + "type": "text", + "content": "[15] Fan Jiang, Xubo Yang, and Lele Feng. 2016. Real-time full-body motion reconstruction and recognition for off-the-shelf VR devices. In Proceedings of the 15th ACM SIGGRAPH Conference on Virtual-Reality Continuum and Its Applications in Industry-Volume 1, 309–318." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 318, + 294, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 294, + 350 + ], + "type": "text", + "content": "[16] Jiaxi Jiang, Paul Streli, Huajian Qiu, Andreas Fender, Larissa Laich, Patrick Snape, and Christian Holz. 2022. Avatarposer: Articulated full-body pose tracking from sparse motion sensing. In European Conference on Computer Vision. Springer, 443-460." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 350, + 294, + 382 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 350, + 294, + 382 + ], + "spans": [ + { + "bbox": [ + 53, + 350, + 294, + 382 + ], + "type": "text", + "content": "[17] Yifeng Jiang, Yuting Ye, Deepak Gopinath, Jungdam Won, Alexander W Winkler, and C Karen Liu. 2022. Transformer Inertial Poser: Real-time human motion reconstruction from sparse IMUs with simultaneous terrain generation. In SIGGRAPH Asia 2022 Conference Papers. 1-9." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 382, + 294, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 382, + 294, + 414 + ], + "spans": [ + { + "bbox": [ + 53, + 382, + 294, + 414 + ], + "type": "text", + "content": "[18] Haojian Jin, Zhijian Yang, Swarun Kumar, and Jason I Hong. 2018. Towards wearable everyday body-frame tracking using passive RFIDs. Proceedings of the ACM on Interactive, Mobile, Wearable and Ubiquitous Technologies 1, 4 (2018), 1-23." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 414, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 414, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 53, + 414, + 294, + 437 + ], + "type": "text", + "content": "[19] Daehwa Kim and Chris Harrison. 2022. Etherpose: Continuous hand pose tracking with wrist-worn antenna impedance characteristic sensing. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology. 1-12." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 437, + 294, + 469 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 437, + 294, + 469 + ], + "spans": [ + { + "bbox": [ + 53, + 437, + 294, + 469 + ], + "type": "text", + "content": "[20] David Kim, Otmar Hilliges, Shahram Izadi, Alex D Butler, Jiawen Chen, Jason Oikonomidis, and Patrick Olivier. 2012. Digits: freehand 3D interactions anywhere using a wrist-worn gloveless sensor. In Proceedings of the 25th annual ACM symposium on User interface software and technology. 167-176." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 469, + 294, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 469, + 294, + 485 + ], + "spans": [ + { + "bbox": [ + 53, + 469, + 294, + 485 + ], + "type": "text", + "content": "[21] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 485, + 294, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 485, + 294, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 485, + 294, + 517 + ], + "type": "text", + "content": "[22] Alexander Kyu, Hongyu Mao, Junyi Zhu, Mayank Goel, and Karan Ahuja. 2024. EITPose: Wearable and Practical Electrical Impedance Tomography for Continuous Hand Pose Estimation. In Proceedings of the CHI Conference on Human Factors in Computing Systems. 1-10." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 517, + 294, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 517, + 294, + 541 + ], + "spans": [ + { + "bbox": [ + 53, + 517, + 294, + 541 + ], + "type": "text", + "content": "[23] Jiye Lee and Hanbyul Joo. 2024. Mocap Everyone Everywhere: Lightweight Motion Capture With Smartwatches and a Head-Mounted Camera. arXiv preprint arXiv:2401.00847 (2024)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 541, + 294, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 541, + 294, + 564 + ], + "spans": [ + { + "bbox": [ + 53, + 541, + 294, + 564 + ], + "type": "text", + "content": "[24] Yilin Liu, Shijia Zhang, and Mahanth Gowda. 2021. NeuroPose: 3D hand pose tracking using EMG wearables. In Proceedings of the Web Conference 2021. 1471-1482." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 564, + 294, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 564, + 294, + 589 + ], + "spans": [ + { + "bbox": [ + 53, + 564, + 294, + 589 + ], + "type": "text", + "content": "[25] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. 2015. SMPL: A Skinned Multi-Person Linear Model. ACM Trans. Graphics (Proc. SIGGRAPH Asia) 34, 6 (Oct. 2015), 248:1-248:16." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 589, + 294, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 589, + 294, + 620 + ], + "spans": [ + { + "bbox": [ + 53, + 589, + 294, + 620 + ], + "type": "text", + "content": "[26] Naureen Mahmood, Nima Ghorbani, Nikolaus F Troje, Gerard Pons-Moll, and Michael J Black. 2019. AMASS: Archive of motion capture as surface shapes. In Proceedings of the IEEE/CVF international conference on computer vision. 5442-5451." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 53, + 620, + 206, + 629 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 620, + 206, + 629 + ], + "spans": [ + { + "bbox": [ + 53, + 620, + 206, + 629 + ], + "type": "text", + "content": "[27] Microsoft Corporation. [n.d.]. Microsoft Kinect." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 53, + 629, + 294, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 629, + 294, + 661 + ], + "spans": [ + { + "bbox": [ + 53, + 629, + 294, + 661 + ], + "type": "text", + "content": "[28] Vimal Mollyn, Riku Arakawa, Mayank Goel, Chris Harrison, and Karan Ahuja. 2023. IMUPoser: Full-Body Pose Estimation using IMUs in Phones, Watches, and Earbuds. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems. 1-12." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 53, + 661, + 254, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 661, + 254, + 669 + ], + "spans": [ + { + "bbox": [ + 53, + 661, + 254, + 669 + ], + "type": "text", + "content": "[29] NaturalPoint, Inc. [n.d.]. OptiTrack. https://www.optitrack.com." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 53, + 669, + 294, + 700 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 669, + 294, + 700 + ], + "spans": [ + { + "bbox": [ + 53, + 669, + 294, + 700 + ], + "type": "text", + "content": "[30] Shu Nishiguchi, Minoru Yamada, Koutatsu Nagai, Shuhei Mori, Yuu Kajiwara, Takuya Sonoda, Kazuya Yoshimura, Hiroyuki Yoshitomi, Hiromu Ito, Kazuya Okamoto, et al. 2012. Reliability and validity of gait analysis by android-based smartphone. Telemedicine and e-Health 18, 4 (2012), 292–296." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 86, + 559, + 605 + ], + "type": "list", + "angle": 0, + "index": 48, + "blocks": [ + { + "bbox": [ + 316, + 86, + 559, + 102 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 86, + 559, + 102 + ], + "spans": [ + { + "bbox": [ + 316, + 86, + 559, + 102 + ], + "type": "text", + "content": "[31] Northern Digital Inc. 2020. travSTAR. https://www.ndigital.com/msci/products/drivebay-trakstar." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 102, + 559, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 102, + 559, + 135 + ], + "spans": [ + { + "bbox": [ + 316, + 102, + 559, + 135 + ], + "type": "text", + "content": "[32] Mathias Parger, Joerg H Mueller, Dieter Schmalstieg, and Markus Steinberger. 2018. Human upper-body inverse kinematics for increased embodiment in consumer-grade virtual reality. In Proceedings of the 24th ACM symposium on virtual reality software and technology. 1-10." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 135, + 548, + 143 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 135, + 548, + 143 + ], + "spans": [ + { + "bbox": [ + 316, + 135, + 548, + 143 + ], + "type": "text", + "content": "[33] Polhemus. 2020. Polhemus Motion Capture System. https://polhemus.com/." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 143, + 463, + 151 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 143, + 463, + 151 + ], + "spans": [ + { + "bbox": [ + 316, + 143, + 463, + 151 + ], + "type": "text", + "content": "[34] PolyCam. [n.d.]. PolyCam. https://poly.cam/." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 151, + 558, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 151, + 558, + 175 + ], + "spans": [ + { + "bbox": [ + 316, + 151, + 558, + 175 + ], + "type": "text", + "content": "[35] Jose Luis Ponton, Haoran Yun, Andreas Aristidou, Carlos Andujar, and Nuria Pelechano. 2023. SparsePoser: Real-time Full-body Motion Reconstruction from Sparse Data. ACM Transactions on Graphics 43, 1 (2023), 1-14." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 175, + 558, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 175, + 558, + 198 + ], + "spans": [ + { + "bbox": [ + 316, + 175, + 558, + 198 + ], + "type": "text", + "content": "[36] Jathushan Rajasegaran, Georgios Pavlakos, Angjoo Kanazawa, and Jitendra Malik. 2021. Tracking people with 3D representations. arXiv preprint arXiv:2111.07868 (2021)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 316, + 198, + 558, + 223 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 558, + 223 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 558, + 223 + ], + "type": "text", + "content": "[37] Nirupam Roy, He Wang, and Romit Roy Choudhury. 2014. I am a smartphone and i can tell my user's walking direction. In Proceedings of the 12th annual international conference on Mobile systems, applications, and services. 329-342." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 316, + 223, + 558, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 223, + 558, + 247 + ], + "spans": [ + { + "bbox": [ + 316, + 223, + 558, + 247 + ], + "type": "text", + "content": "[38] Takaki Shiratori, Hyun Soo Park, Leonid Sigal, Yaser Sheikh, and Jessica K Hodgins. 2011. Motion capture from body-mounted cameras. In ACM SIGGRAPH 2011 papers. 1-10." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 316, + 247, + 558, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 247, + 558, + 262 + ], + "spans": [ + { + "bbox": [ + 316, + 247, + 558, + 262 + ], + "type": "text", + "content": "[39] Ivan E Sutherland. 1968. A head-mounted three dimensional display. In Proceedings of the December 9-11, 1968, fall joint computer conference, part I. 757-764." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 316, + 262, + 558, + 286 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 262, + 558, + 286 + ], + "spans": [ + { + "bbox": [ + 316, + 262, + 558, + 286 + ], + "type": "text", + "content": "[40] Matthew Trumble, Andrew Gilbert, Charles Malleson, Adrian Hilton, and John Collomosse. 2017. Total capture: 3d human pose estimation fusing video and inertial sensors. In Proceedings of 28th British Machine Vision Conference. 1-13." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 316, + 286, + 522, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 286, + 522, + 294 + ], + "spans": [ + { + "bbox": [ + 316, + 286, + 522, + 294 + ], + "type": "text", + "content": "[41] Vicon Motion Systems Ltd. [n.d.]. Vicon. https://www.vicon.com." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 316, + 294, + 558, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 294, + 558, + 318 + ], + "spans": [ + { + "bbox": [ + 316, + 294, + 558, + 318 + ], + "type": "text", + "content": "[42] Daniel Vlasic, Rolf Adelsberger, Giovanni Vannucci, John Barnwell, Markus Gross, Wojciech Matusik, and Jovan Popovic. 2007. Practical motion capture in everyday surroundings. ACM transactions on graphics (TOG) 26, 3 (2007), 35-es." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 316, + 318, + 558, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 318, + 558, + 342 + ], + "spans": [ + { + "bbox": [ + 316, + 318, + 558, + 342 + ], + "type": "text", + "content": "[43] Timo Von Marcard, Bodo Rosenhahn, Michael J Black, and Gerard Pons-Moll. 2017. Sparse inertial poser: Automatic 3d human pose estimation from sparse imus. In Computer graphics forum, Vol. 36. Wiley Online Library, 349-360." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 316, + 342, + 558, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 342, + 558, + 374 + ], + "spans": [ + { + "bbox": [ + 316, + 342, + 558, + 374 + ], + "type": "text", + "content": "[44] Erwin Wu, Ye Yuan, Hui-Shyong Yeo, Aaron Quigley, Hideki Koike, and Kris M Kitani. 2020. Back-hand-posed: 3d hand pose estimation for a wrist-worn camera via dorsum deformation network. In Proceedings of the 33rd Annual ACM Symposium on User Interface Software and Technology. 1147–1160." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 316, + 374, + 558, + 389 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 374, + 558, + 389 + ], + "spans": [ + { + "bbox": [ + 316, + 374, + 558, + 389 + ], + "type": "text", + "content": "[45] Xsens Technologies B.V. [n.d.]. Xsens IMU Systems. https://www.xsens.com. Accessed: 2024-03-07." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 316, + 389, + 558, + 413 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 389, + 558, + 413 + ], + "spans": [ + { + "bbox": [ + 316, + 389, + 558, + 413 + ], + "type": "text", + "content": "[46] Hang Yan, Qi Shan, and Yasutaka Furukawa. 2018. RIDI: Robust IMU double integration. In Proceedings of the European conference on computer vision (ECCV), 621-636." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 316, + 413, + 558, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 413, + 558, + 445 + ], + "spans": [ + { + "bbox": [ + 316, + 413, + 558, + 445 + ], + "type": "text", + "content": "[47] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Vladislav Golyanik, Shaohua Pan, Christian Theobalt, and Feng Xu. 2023. EgoLocate: Real-time Motion Capture, Localization, and Mapping with Sparse Body-mounted Sensors. arXiv preprint arXiv:2305.01599 (2023)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 316, + 445, + 558, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 445, + 558, + 485 + ], + "spans": [ + { + "bbox": [ + 316, + 445, + 558, + 485 + ], + "type": "text", + "content": "[48] Xinyu Yi, Yuxiao Zhou, Marc Habermann, Soshi Shimada, Vladislav Golyanik, Christian Theobalt, and Feng Xu. 2022. Physical inertial poser (pip): Physics-aware real-time human motion tracking from sparse inertial sensors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 13167-13178." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 316, + 485, + 558, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 485, + 558, + 509 + ], + "spans": [ + { + "bbox": [ + 316, + 485, + 558, + 509 + ], + "type": "text", + "content": "[49] Xinyu Yi, Yuxiao Zhou, and Feng Xu. 2021. Transpose: Real-time 3d human translation and pose estimation with six inertial sensors. ACM Transactions on Graphics (TOG) 40, 4 (2021), 1-13." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 316, + 509, + 558, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 509, + 558, + 540 + ], + "spans": [ + { + "bbox": [ + 316, + 509, + 558, + 540 + ], + "type": "text", + "content": "[50] Yang Zhang, Chouchang Yang, Scott E Hudson, Chris Harrison, and Alanson Sample. 2018. Wall++ room-scale interactive and context-aware sensing. In Proceedings of the 2018 chi conference on human factors in computing systems. 1-15." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 316, + 540, + 558, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 540, + 558, + 572 + ], + "spans": [ + { + "bbox": [ + 316, + 540, + 558, + 572 + ], + "type": "text", + "content": "[51] Mingmin Zhao, Tianhong Li, Mohammad Abu Alsheikh, Yonglong Tian, Hang Zhao, Antonio Torralba, and Dina Katabi. 2018. Through-wall human pose estimation using radio signals. In Proceedings of the IEEE conference on computer vision and pattern recognition. 7356-7365." + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 316, + 572, + 558, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 572, + 558, + 605 + ], + "spans": [ + { + "bbox": [ + 316, + 572, + 558, + 605 + ], + "type": "text", + "content": "[52] Li'an Zhuo, Jian Cao, Qi Wang, Bang Zhang, and Liefeng Bo. 2023. Towards Stable Human Pose Estimation via Cross-View Fusion and Foot Stabilization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 650-659." + } + ] + } + ], + "index": 47 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 397, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 397, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 397, + 68 + ], + "type": "text", + "content": "MobilePoser: Real-Time Full-Body Pose Estimation and 3D Human Translation from IMUs in Mobile Consumer Devices" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 411, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 411, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 411, + 60, + 559, + 69 + ], + "type": "text", + "content": "UIST '24, October 13-16, 2024, Pittsburgh, PA, USA" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_content_list.json b/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..106b5c4e1dc748fe51e32fa6fab99c5316abbee6 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_content_list.json @@ -0,0 +1,2464 @@ +[ + { + "type": "text", + "text": "Embodied-R: Collaborative Framework for Activating Embodied Spatial Reasoning in Foundation Models via Reinforcement Learning", + "text_level": 1, + "bbox": [ + 148, + 99, + 849, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Baining Zhao*, Ziyou Wang*, Jianjie Fang*, Chen Gao†, Fanghang Man, Jinqiang Cui, Xin Wang, Xinlei Chen†, Yong Li, Wenwu Zhu", + "bbox": [ + 187, + 188, + 812, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tsinghua University", + "bbox": [ + 416, + 226, + 581, + 243 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/260ca3540d2ad6cd03f0d4ebb90864c70e7bb2a3f3d5777757b66e28572d71f1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 334, + 243, + 354, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project Page", + "bbox": [ + 359, + 244, + 454, + 260 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/7466a5b5cddbe1975919ac7f8fb16269bedbff7a95fb25f6839397ba74145802.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 242, + 620, + 257 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Code", + "bbox": [ + 622, + 244, + 663, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tasks", + "text_level": 1, + "bbox": [ + 274, + 282, + 310, + 294 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/b26be9858af5be7bd451a3520f190c77d197a842af79e537534c1d3cdadee387.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 109, + 300, + 300, + 378 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "① Landmark Position", + "bbox": [ + 303, + 301, + 385, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "what is your current position relative to [landmark] in", + "bbox": [ + 316, + 311, + 457, + 316 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "[navigation instruction]", + "bbox": [ + 316, + 316, + 380, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "② Counterfactual Reasoning", + "bbox": [ + 305, + 327, + 411, + 335 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Can you still reach destination if moving in another direction?", + "bbox": [ + 305, + 335, + 478, + 340 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3Progress Evaluation", + "bbox": [ + 305, + 345, + 390, + 353 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Which step the navigation is currently perform in", + "bbox": [ + 305, + 353, + 446, + 361 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4 Action Generation", + "bbox": [ + 305, + 369, + 385, + 378 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "What is your next action given [navigation instruction]?", + "bbox": [ + 305, + 378, + 462, + 385 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$⑤$ Relative Distance", + "bbox": [ + 305, + 395, + 380, + 402 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Which object is the closest to [object A]?", + "bbox": [ + 316, + 402, + 424, + 410 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "6 Relative Direction", + "bbox": [ + 305, + 415, + 383, + 422 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "If you are standing by [object A] and facing [object B], is", + "bbox": [ + 316, + 422, + 462, + 429 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the [object C] to your left, right, or back?", + "bbox": [ + 316, + 429, + 424, + 435 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "7 Route Planning", + "bbox": [ + 305, + 439, + 372, + 446 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "How to navigate to [object A]?", + "bbox": [ + 316, + 446, + 398, + 453 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Appearance Order", + "bbox": [ + 305, + 458, + 385, + 465 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "What will be the first-time appearance order of [object A,", + "bbox": [ + 305, + 465, + 467, + 472 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "object B, object C] in your memory?", + "bbox": [ + 316, + 473, + 411, + 479 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "··", + "bbox": [ + 316, + 484, + 331, + 489 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8066252818aa0f756bf50e09469d3ee84df1cccc7b9c939aa9775510b37a56f7.jpg", + "image_caption": [ + "Embodied-R" + ], + "image_footnote": [], + "bbox": [ + 477, + 340, + 568, + 425 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reasoning", + "text_level": 1, + "bbox": [ + 697, + 282, + 767, + 296 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a7afab221f6d6c3288ef9526fa3ff072372cb905f27e3f3fa7b35a2294a9b74a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 581, + 297, + 885, + 320 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "[Example] Question: Navigation Instruction given at initial position: [Observe around, then fly towards the road, then turn left and land on the roof of the building on the left]. What are you doing now?", + "bbox": [ + 584, + 323, + 866, + 345 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Choose: A. I look around the square area. B. I fly towards the road.", + "bbox": [ + 584, + 347, + 839, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "C. I turn left and land on the roof of the building on the left.", + "bbox": [ + 617, + 356, + 834, + 363 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "D. I fly over the park. E. I land.", + "bbox": [ + 617, + 364, + 779, + 371 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "E.I land", + "bbox": [ + 589, + 375, + 620, + 388 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/8fe50a38bdcaff17c4598a1be54b92b276eda1fc2d6c11ae4b445b89417288e8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 629, + 375, + 648, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Fast-Thinking", + "bbox": [ + 663, + 375, + 728, + 382 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Slow-Thinking", + "bbox": [ + 800, + 390, + 870, + 398 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Think: Let's analyze the event sequence and the instruction step by step:", + "bbox": [ + 584, + 402, + 867, + 409 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Initial Position: I start by observing around the square area.", + "bbox": [ + 583, + 410, + 805, + 417 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2. Next Step: After observing, I fly towards the highway.", + "bbox": [ + 583, + 417, + 785, + 425 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3. Subsequent Step: Once at the highway, I should turn left.", + "bbox": [ + 583, + 426, + 797, + 433 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4. Final Step: Finally, I should land on the roof of the building on the left", + "bbox": [ + 583, + 434, + 844, + 441 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "From the video frames:", + "bbox": [ + 583, + 441, + 666, + 450 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- The initial frames show buildings and a square area, indicating the phase.", + "bbox": [ + 583, + 452, + 852, + 459 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- The subsequent frames show a transition from the square area to a broader view.", + "bbox": [ + 583, + 460, + 883, + 467 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- The later frames depict a clear view of the road and surrounding buildings.", + "bbox": [ + 583, + 469, + 861, + 474 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6b2092ae3ac9d8a38a7043afa27952f1f670fafcd1820ad106db5ceb62650bc1.jpg", + "image_caption": [ + "Figure 1: Embodied spatial reasoning: tasks and thinking process. Challenging tasks from public embodied video datasets are identified, encompassing both indoor and outdoor scenarios. We introduce slow-thinking to improve reasoning performance." + ], + "image_footnote": [], + "bbox": [ + 584, + 478, + 601, + 493 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Answer: B. I fly towards the road.", + "bbox": [ + 602, + 484, + 723, + 491 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/0690f619416d8140759d8bfe85c98be41326383f16d97579a0edd83c69a8451f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 735, + 479, + 753, + 493 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 83, + 541, + 156, + 554 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Humans can perceive and reason about spatial relationships from sequential visual observations, such as egocentric video streams. However, how pretrained models acquire such abilities, especially high-level reasoning, remains unclear. This paper introduces Embodied-R, a collaborative framework combining large-scale Vision-Language Models (VLMs) for perception and small-scale Language Models (LMs) for reasoning. Using Reinforcement Learning (RL) with a novel reward system considering think-answer logical consistency, the model achieves slow-thinking capabilities with limited computational resources. After training on only 5k embodied video samples, Embodied-R with a 3B LM matches state-of-the-art multimodal reasoning models (OpenAI-o1, Gemini-2.5-pro) on both in-distribution and out-of-distribution embodied spatial reasoning tasks. Embodied-R also exhibits emergent thinking patterns such as systematic analysis and contextual integration. We further explore research questions including response length, training on VLM, strategies for reward design, and differences in model generalization after SFT (Supervised Fine-Tuning) and RL training.", + "bbox": [ + 81, + 558, + 485, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 83, + 835, + 218, + 849 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "On the path toward Artificial General Intelligence (AGI) [17], we hope that pre-trained foundation models can not only perform tasks such as dialogue and image understanding in the cyber world [2, 44]", + "bbox": [ + 81, + 853, + 480, + 896 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "but also develop human-like embodied spatial cognition in the three-dimensional physical world, enabling them to perceive, think, and move [4, 32]. The fundamental way humans achieve spatial cognition is through continuous, dynamic visual observations, akin to video streams [26, 30]. For example, by observing their surroundings, humans can infer their position relative to nearby objects. Similarly, based on historical visual observations, humans can determine the actions they should take to reach a target destination.", + "bbox": [ + 511, + 541, + 915, + 652 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Visual spatial cognition can be divided into two levels: perception and reasoning [51]. Perception refers to \"what is seen\", characterized by direct, low-level tasks such as object recognition, edge detection, or color differentiation [52]. Reasoning, on the other hand, involves \"what is understood\" and \"what actions to take\", which are indirect and higher-level tasks requiring logical inference and knowledge integration [62]. Examples of reasoning include \"Where did I come from?\" (e.g., recalling historical movement trajectories [36]), \"Where am I?\" (e.g., inferring the spatial relationships between nearby objects and distances [5]), and \"Where do I want to go?\" (e.g., planning actions and deciding movements to reach a destination [8]). While most existing research focuses on improving the perception capabilities of foundation models [6, 11], with notable progress, their spatial reasoning abilities remain limited [9, 58], and methods for enhancement are largely unexplored.", + "bbox": [ + 511, + 652, + 913, + 859 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Specifically, video-based spatial reasoning poses several challenges, as follows:", + "bbox": [ + 513, + 859, + 913, + 887 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.12680v1 [cs.AI] 17 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Reasoning is always built upon perception [19, 32]. For the studied problem, continuous visual observations impose higher demands on perception. Reasoning cannot be well achieved with faulty perceptions or hallucinations [53]. It is challenging to reason when it is already hard to perceive from the videos.", + "- Video data naturally involves complex spatio-temporal relationships, requiring the discovery of object associations across frames and the extraction of semantics relevant to the reasoning task [16]. For instance, to navigate to a destination outside the current field of view, one must infer their location from historical visual observations, build a mental map of the environment, develop a high-level plan to determine the direction, and finally decide on specific actions to execute. Existing supervised fine-tuning (SFT) training methods lack supervision for the reasoning process, making it difficult to handle such reasoning tasks [62].", + "- Embodied visual observations have distinct characteristics. First, understanding disembodied videos, such as movies or TV shows, primarily emphasizes the content within the video, often from a broad and objective perspective [27]. In contrast, egocentric videos focus on understanding the relationship between the observer and the surrounding environment, often from a constrained first-person perspective [22]. Second, embodied continuous visual observations are generated over time, indicating that embodied perception should rely on sequential inputs rather than aggregating all visual observations for a single input after a prolonged period [31]. Finally, due to the continuity of motion in the physical world, egocentric visual observations also exhibit spatial continuity, meaning there is significant redundancy and repetition between frames. Consequently, directly applying existing multimodal large language models (MLLMs) to embodied videos leads to issues, including loss of generalization and input token limits caused by excessive redundant frames [1, 29]." + ], + "bbox": [ + 83, + 106, + 482, + 549 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recently, the impressive performance of OpenAI's o1/o3 [38] and DeepSeek-R1 [24] in solving complex reasoning problems(e.g., mathematics, coding, science, etc.) has drawn attention to reinforcement learning (RL) techniques. By incorporating the chain-of-thought (CoT) reasoning process into post-training, large language models (LLMs) demonstrate a \"slow-thinking\" mode, where they reason thoroughly before generating responses [45, 55]. Inspired by this, we attempt to introduce \"slow thinking\" into embodied video-based spatial reasoning tasks, as shown in Figure 1.", + "bbox": [ + 81, + 563, + 482, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This brings a new challenge: the trade-off between model size and computational cost. Existing studies suggest a strong correlation between multimodal understanding/perception capabilities and model size [7, 20, 56]. Since reasoning builds on perception, larger vision-language foundation models should be used as the starting point for training. However, increasing model size leads to often unacceptable computational costs. Additionally, video inputs map to long token sequences, further raising computational demands. Is there a way to leverage the perception capabilities of large-scale models while developing embodied reasoning abilities at a lower computational cost?", + "bbox": [ + 81, + 688, + 482, + 839 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Inspired by neuroscience [64], spatial perception and reasoning involve distinct brain regions: visual perception occurs in the visual areas of the occipital lobe [13], basic spatial understanding in the parietal lobe [18], and complex spatial reasoning in the prefrontal", + "bbox": [ + 81, + 840, + 482, + 896 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "cortex [14]. This inspired the design of a collaborative framework with two main components: a large-scale vision-language model (VLM) for perception and a small-scale language model (LM) for reasoning. Based on the continuity of observations, we first propose a key-frame extractor to retain critical information while reducing computational costs. Using a VLM, we sequentially extract semantic information from the frames, which simulates real-world online reasoning while effectively managing the input token length of VLMs for long video inputs. Finally, the semantic information and reasoning question are fed into the small-scale language model, which outputs the reasoning process and final answers. The small-scale language model is trained with RL, where the reward modeling not only incorporates rule-based rewards inspired by Deepseek-R1-Zero [24] but, more importantly, introduces a novel reward for the logical consistency of the reasoning process. In the experiments, we explore seven research questions, covering the framework's performance, RL's role in activating embodied spatial reasoning, and out-of-distribution generalization capabilities.", + "bbox": [ + 511, + 106, + 913, + 354 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In general, the main contributions of this paper are as follows:", + "bbox": [ + 529, + 356, + 908, + 369 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose a collaborative framework for large-scale and small-scale foundation models to address spatial reasoning in the video modality. By decoupling perception and reasoning, the framework leverages the perceptual strength of large-scale foundation models while efficiently enhancing the reasoning capabilities of smaller models in a computationally resource-friendly manner.", + "- This is the first work to employ reinforcement learning (RL) to enhance the embodied spatial reasoning abilities of foundation models. Specifically, we introduce a novel logical consistency reward, which improves the alignment between reasoning processes and generated answers.", + "- Our proposed Embodied-R achieves performance comparable to state-of-the-art multimodal large language models (e.g., OpenAI-o1/Gemini-2.5-Pro) on both in-distribution and out-of-distribution benchmarks. We further investigate research questions including the generalization comparison between models trained by SFT & RL, reward design strategies, etc." + ], + "bbox": [ + 514, + 383, + 913, + 618 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 514, + 642, + 658, + 656 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Language Model Reasoning. Recently, enhancing reasoning capabilities has become a key focus in large model technologies, demonstrating remarkable performance on tasks such as mathematical and logical problem-solving [25, 47, 57]. Following the release of OpenAI's o1 [38], numerous studies have proposed various technical approaches to achieve similar functionalities, including Chain-of-Thought (CoT) [54], Monte Carlo Tree Search (MCTS) [23, 60], distillation [35], rejection sampling combined with supervised fin-tuning (SFT) or Direct Preference Optimization (DPO) [40], among others. Furthermore, Deepseek-r1 [24] introduced a method to foster the emergence of reasoning abilities in large language models (LLMs) through rule-based rewards combined with reinforcement learning. Similarly, Kimi k1.5 [45] proposed a comparable approach, presenting various training techniques, such as curriculum learning. This reinforcement learning paradigm has sparked significant interest, with subsequent works successfully reproducing related results [55, 59].", + "bbox": [ + 511, + 660, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Embodied Spatial Reasoning with VLMs. Inspired by the generality of foundation models across various domains [2, 3], embodied intelligence aims to develop agents that utilize large multimodal models as their \"brains\" to achieve perception, navigation, and manipulation in the 3D physical world [15, 41]. In terms of input, human visual-spatial perception is more akin to continuous RGB observations, similar to video streams [12, 42], rather than static images [48] or point clouds [52]. Several embodied video benchmarks [58] demonstrate that, while perception tasks are relatively well-addressed, spatial reasoning tasks—such as spatial relationship inference, navigation, and planning—remain highly challenging. However, existing research [16, 43] on video reasoning primarily focuses on disembodied content reasoning, with little emphasis on scenarios involving embodied continuous visual inputs.", + "bbox": [ + 81, + 106, + 480, + 299 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Collaboration between large and small models. Existing research primarily focuses on addressing the resource consumption and privacy risks associated with large models, as well as the efficiency and performance advantages of small models in specific scenarios [50]. Small models can assist large models in data selection, prompt optimization, and reasoning enhancement [28, 61]. The use of small models to detect hallucinations and privacy leakage is explored in [49, 63], improving overall system reliability. While our work shares the goal of reducing computational resource demands, it differs by emphasizing the complementary roles of large-scale VLMs in perception and small-scale LMs in enhancing embodied spatial reasoning.", + "bbox": [ + 81, + 300, + 480, + 467 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 The Embodied-R Method", + "text_level": 1, + "bbox": [ + 83, + 479, + 318, + 494 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We first define the problem of embodied spatial reasoning. Subsequently, we introduce the VLM-based perception module and the LM-based reasoning module. The collaborative framework is shown in Figure 2.", + "bbox": [ + 81, + 498, + 482, + 555 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Problem Formulation", + "text_level": 1, + "bbox": [ + 83, + 566, + 303, + 580 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In the physical world, an agent moves through space, generating a sequence of video frames (continuous visual observations) $\\mathbf{f} = [f_0, f_1, \\dots, f_T]$ . Suppose a spatial reasoning problem is denoted as $q$ . Our goal is to build a model that takes $q$ and $\\mathbf{f}$ as inputs and outputs an answer $a$ . The answer $a$ is considered correct if it is semantically consistent with the ground truth $g$ ; otherwise, it is deemed incorrect.", + "bbox": [ + 81, + 585, + 480, + 683 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Large-Scale VLM-based Perception", + "text_level": 1, + "bbox": [ + 83, + 696, + 406, + 713 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.1 Key-Frame Extractor. As the agent moves continuously in space, high sampling frequencies result in significant overlap between consecutive frames. On one hand, the VLM relies on changes in the static objects within the environment across frames to infer the agent's pose variation. On the other hand, excessive overlap between frames leads to increased inference costs for both the VLM and LLM. To address this, we designed a key-frame extractor tailored to the characteristics of embodied videos, selecting key frames that retain overlap while ensuring sufficient information gain between them.", + "bbox": [ + 81, + 715, + 480, + 853 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The extraction of key-frames is based on the overlap of visual fields caused by motion continuity. When the agent moves forward, the visual content in the latter frame is expected to overlap with a", + "bbox": [ + 81, + 854, + 480, + 896 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "portion of the former frame, and the reverse is true when moving backward. Similarly, during left or right rotations, the latter frame should partially overlap with the former frame in the horizontal direction, and during upward or downward rotations, the overlap occurs in the vertical direction. Given that the sampling frequency of visual observations is typically much higher than the agent's motion speed, frames generally exhibit significant overlap.", + "bbox": [ + 511, + 106, + 913, + 204 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Specifically, a perspective transformation is used to model the geometric relationship between frames. Assuming $f_{t}$ is a key-frame, to determine whether $f_{t+1}$ should also be considered a keyframe, keypoints and descriptors are calculated from $f_{t}$ and $f_{t+1}$ using the Oriented FAST and Rotated BRIEF (ORB) algorithm. Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames and the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix. The overlap ratio between two frames is then computed. If overlap ratio is less than a predefined threshold, it indicates significant visual changes between the frames, and $f_{t+1}$ is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between $f_{t}$ and $f_{t+2}$ . This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as $\\mathbf{f}' = [f_{k_0}, f_{k_1}, \\dots, f_{k_n}]$ , the keyframe extraction process can be summarized as:", + "bbox": [ + 511, + 204, + 913, + 479 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} ^ {\\prime} = \\mathrm {K} - \\operatorname {E x t r a c t o r} (\\mathbf {f}). \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 650, + 488, + 913, + 503 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2.2 Embodied Semantic Representation. Since perceptual capability is positively correlated with model size [27, 58, 62], we employ a large-scale VLM to process visual inputs to ensure high-quality perception. The differential information of each key frame is described sequentially. This approach provides two key benefits: 1) The sequential and dynamic processing aligns better with the characteristics of embodied scenarios, where visual observations are continuously generated over time. At each moment, the model should integrate historical semantic representations with the latest visual observations, rapidly updating the semantic understanding of spatial perception. 2) It facilitates the handling of long videos by avoiding the input token limitations that arise when all frames are processed simultaneously by the VLM.", + "bbox": [ + 511, + 512, + 913, + 691 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Specifically, for the first frame, the VLM identifies the objects present in the scene, their attributes, and their spatial locations. For subsequent frames, both the previous frame and the current frame are input into the VLM to extract key semantic representation $s_{k_j}$ :", + "bbox": [ + 511, + 691, + 913, + 750 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\ns _ {k _ {j}} \\sim \\psi_ {\\theta} (s | f _ {k _ {j - 1}}, f _ {k _ {j}}; q), j = 1, 2, \\dots , n, \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 599, + 756, + 913, + 773 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $s_{k_j}$ consists of three items:", + "bbox": [ + 513, + 780, + 718, + 795 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Action: Inferring the agent's actions based on the changes in visual observations between consecutive frames.", + "- $\\Delta$ Information: Determining changes in the spatial relationships between the agent and known objects, as well as identifying whether new objects appear in the field of view.", + "- $q$ -related content: Detecting whether objects or information relevant to the reasoning task appear in the latest field of view." + ], + "bbox": [ + 514, + 797, + 913, + 895 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/b4adaf2d4183da6749bf4525fe2d5c946fa39eb4214e564e23bd8a915e471952.jpg", + "image_caption": [ + "Figure 2: The proposed Embodied-R is a collaborative embodied spatial reasoning framework integrating a Vision-Language Model (VLM) and a Language Model (LM). The separation of perception and reasoning enables us to leverage the perceptual capabilities of large-scale VLMs while training a resource-efficient small-scale LM to activate embodied reasoning through RL. Notably, we introduce a novel logical consistency reward to guide the LM in producing logically coherent reasoning and answer." + ], + "image_footnote": [], + "bbox": [ + 84, + 103, + 916, + 448 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this way, we can extract spatial semantic representations $\\mathbf{s} = [s_{k_0}, s_{k_1}, \\dots, s_{k_n}]$ from the keyframe $\\mathbf{f}'$ .", + "bbox": [ + 83, + 537, + 482, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Small-Scale LM-based Reasoning", + "text_level": 1, + "bbox": [ + 83, + 592, + 393, + 608 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Given semantic perception, we can train a training-friendly small-scale language model capable of performing embodied spatial reasoning. Assuming the small-scale LM is denoted as $\\pi_{\\theta}$ , the response $o$ inferred from the model can be expressed as: $o \\sim \\pi_{\\theta}(o \\mid q, s)$ .", + "bbox": [ + 81, + 609, + 482, + 666 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our training objective is to ensure that the model adheres to the \"think-then-answer\" paradigm, where the thinking process is logical, and the answer is correct. We follow DeepSeek-R1-Zero and adopt a computationally efficient RL training strategy, Group Relative Policy Optimization (GRPO). Besides rule-based format and accuracy rewards, we propose a novel reasoning process reward tailored for embodied reasoning tasks to mitigate reward hacking and enhance the logical consistency between the reasoning process and the final answer.", + "bbox": [ + 81, + 666, + 482, + 790 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.1 Group Relative Policy Optimization. For a given query $q$ and semantic annotation $s$ , GRPO generates a group of outputs $\\{o_1, o_2, \\dots, o_G\\}$ using the reference policy $\\pi_{\\mathrm{ref}}$ . The reference policy typically refers to the original model not trained via GRPO. The policy model $\\pi_\\theta$ is then updated by optimizing the following objective:", + "bbox": [ + 81, + 811, + 482, + 896 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {J} (\\theta) = \\mathbb {E} _ {(q, s) \\sim \\mathbb {D}, \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\text {o l d}} (o | q, s)} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q , s)}{\\pi_ {\\text {o l d}} (o _ {i} | q , s)} A _ {i}, \\right. \\right. \\right. \\tag {3} \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q , \\mathbf {s})}{\\pi_ {\\mathrm {o l d}} (o _ {i} | q , \\mathbf {s})}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\right) - \\beta \\mathcal {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} \\| \\pi_ {\\mathrm {r e f}})) \\Biggr ], \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 529, + 571, + 913, + 641 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\epsilon$ and $\\beta$ are hyperparameters, and $\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\mathrm{ref}})$ is KL divergence penalty: $\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\mathrm{ref}}) = \\pi_{\\mathrm{ref}}(r_i|q,\\mathbf{s})\\log \\frac{\\pi_{\\mathrm{ref}}(r_i|q,\\mathbf{s})}{\\pi_{\\theta}(r_i|q,\\mathbf{s})} -1.$ $A_{i}$ represents the advantage corresponding to the output $o_i$ , calculated from the corresponding $\\{r_1,r_2,\\dots ,r_G\\} :A_i = \\frac{r_i - \\mathrm{mean}(\\{r_1,r_2,\\dots,r_G\\})}{\\mathrm{std}(\\{r_1,r_2,\\dots,r_G\\})}$", + "bbox": [ + 513, + 661, + 915, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3.2 Reward Modeling. Reward modeling is a critical component of RL algorithms, as their design guides the direction of model optimization. We propose three types of rewards: format reward, accuracy reward, and logical consistency reward. These are designed to respectively guide the model to learn the \"think-answer\" reasoning pattern, accurate embodied spatial reasoning, and logical consistency between reasoning and the answer.", + "bbox": [ + 513, + 743, + 915, + 839 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Format Reward: We aim for the model to output $o_i$ by first producing an embodied reasoning process $p_i$ followed by the final answer $a_i$ . The reasoning process and answer are enclosed within $$ and $$ tags, respectively:", + "bbox": [ + 513, + 840, + 913, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Please assume the role of an agent. Given a question and a series of frames, you should first think about the reasoning process in the mind and then provide the final answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Ensure that your answer is consistent with and directly derived from your thinking process, maintaining logical coherence between the two sections. The frames represent your egocentric observations from the past to the present. Question: q. Video: f'. Assistant:", + "bbox": [ + 106, + 112, + 464, + 265 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "A regular expression is applied to evaluate whether $o_i$ meets the specified requirements, thereby generating the format reward $r_i'$ :", + "bbox": [ + 83, + 280, + 482, + 310 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} ^ {\\prime} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t ;} \\\\ 0, & \\text {i f f o r m a t i s i n c o r r e c t .} \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 183, + 316, + 482, + 353 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Accuracy Reward: The accuracy reward $r_i^{\\prime \\prime}$ model assesses whether the answer $a_i$ is semantically consistent with the ground truth $g$ . For example, multiple-choice questions typically have precise and unique answers, which can be easily extracted when the response adheres to the specified format.", + "bbox": [ + 81, + 357, + 483, + 426 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} ^ {\\prime \\prime} = \\left\\{ \\begin{array}{l l} 1, & a _ {i} = g; \\\\ 0, & a _ {i} \\neq g. \\end{array} \\right. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 431, + 482, + 469 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Logical Consistency Reward: When using only the format reward and accuracy reward, we consistently observed hacking behaviors. Specifically, for spatial reasoning tasks where the possible answers are limited (e.g., the relative position of an object with respect to the agent's body), cases arise where an incorrect reasoning process $p_i$ leads to a correct answer $a_i$ , which is mistakenly assigned a positive reward. As such cases accumulate, the logical consistency of the model's responses deteriorates. To address this issue, we introduce a simple yet effective process reward. Our goal is to ensure a lower bound on logical consistency, such that the reasoning ability of $\\pi_{\\theta}$ should not degrade below that of the reference model $\\pi_{\\mathrm{ref}}$ . Therefore, when the model's answer is correct $(a_i = g)$ , we input the question $q$ and reasoning process $p_i$ into the reference model without providing video frames, yielding an answer:", + "bbox": [ + 81, + 474, + 482, + 669 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\na _ {i} ^ {\\prime} \\sim \\pi_ {\\text {r e f}} (a | q, p _ {i}). \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 223, + 675, + 480, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "If $a_i'$ is consistent with $a_i$ , it indicates that the reasoning process can logically lead to the answer; otherwise, it reflects a logical inconsistency between the reasoning process and the answer.", + "bbox": [ + 81, + 696, + 482, + 739 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} ^ {\\prime \\prime \\prime} = \\left\\{ \\begin{array}{l l} 1, & a _ {i} = a _ {i} ^ {\\prime} = g; \\\\ 0, & \\text {e l s e .} \\end{array} \\right. \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 746, + 482, + 781 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Total Reward: The total reward is a linear combination of the three rewards mentioned above:", + "bbox": [ + 83, + 787, + 482, + 814 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i} = \\omega_ {1} r _ {i} ^ {\\prime} + \\omega_ {2} r _ {i} ^ {\\prime \\prime} + \\omega_ {3} r _ {i} ^ {\\prime \\prime \\prime}. \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 200, + 821, + 482, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 83, + 849, + 218, + 864 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first provide the details of the experimental setup and then demonstrate the following: quantitative results, qualitative results,", + "bbox": [ + 81, + 867, + 483, + 896 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and ablation studies. These correspond to addressing the following three research questions (RQs):", + "bbox": [ + 513, + 106, + 913, + 135 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- RQ1: How does Embodied-R perform compared to existing video-LLMs?", + "- RQ2: Has Embodied-R learned slow-thinking?", + "- RQ3: What are the contributions of each module?" + ], + "bbox": [ + 514, + 137, + 913, + 191 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 514, + 205, + 720, + 222 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.1 Data Preparation. We primarily focus on spatial reasoning problems during motion within three-dimensional physical space to evaluate the effectiveness of our method. For this purpose, we selected two embodied video datasets as the main training and testing sets: VSI-Bench [58], which contains indoor first-person navigation data., and UrbanVideo-Bench [62], which consists of outdoor embodied data captured by drones navigating through aerial spaces. These datasets provide diversity in scenarios by incorporating both outdoor and indoor video data. Based on the content of the tasks, we specifically selected four distinct types of tasks from each dataset, characterized by long spatial reasoning chains and low accuracy. These tasks are formulated as multiple-choice question-answering problems, ensuring determinism in answers to facilitate RL training and allowing direct calculation of accuracy to evaluate performance. Across eight task categories, the dataset covers multiple levels of spatial reasoning, comprising a total of 5,415 QA pairs and 1,492 videos. Additionally, we include two out-of-distribution dataset, EgoSchema [34] and Egocentric task in MVBench [27]. EgoSchema is designed for task-level reasoning from a first-person perspective, with 500 QA pairs and 500 videos available in its fully open-source portion. MVBench encompasses the embodied task of egocentric navigation, comprising 200 QA pairs and 200 corresponding videos. These datasets serve to evaluate the generalization capability of the trained model.", + "bbox": [ + 511, + 224, + 913, + 555 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To ensure comprehensive evaluation, we conducted five repeated experiments. The dataset was randomly divided into five equal parts and 5-fold cross-validation is adopted. The final testing results are averaged across the five experiments. Furthermore, we address the issue of potential semantic bias in the datasets. For instance, in action generation tasks, forward movement may inherently have a higher correctness rate than adjusting the gimbal angle, which is a characteristic of the task itself. To prevent the testing performance from being influenced by the model learning textual distribution rather than truly understanding the spatial information in video, we implement an additional filtering step for the testing set. Specifically, we train a LLM through supervised fine-tuning using only the textual QA pairs from the training set, without video inputs. If a question in the testing set can be correctly answered by the finetuned LLM but not by the original LLM, it indicates semantic bias in that QA pair. These biased QA pairs are excluded from the testing set as they fail to accurately assess the spatial reasoning capabilities of models.", + "bbox": [ + 511, + 556, + 913, + 805 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1.2 Implementation Details. We use Qwen2.5-3B-Instruct [57] as the small-scale LM and Qwen2.5-VL-72B-Instruct [6] as large-scale VLM. Both training and inference processes were conducted using 8 NVIDIA A800-SXM4-40GB GPUs, with each RL training requiring approximately 90 GPU hours. Other key hyperparameters for training are as follows: learning rate: 5e-7, temperature:", + "bbox": [ + 513, + 811, + 916, + 896 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/07b7a0b7ff464267e1f94ac8af3cecb76bbbacb0d09b986b837e1427bcd0b037.jpg", + "table_caption": [ + "Table 1: Accuracy of Embodied-R and baselines on 8 indoor and outdoor embodied spatial reasoning tasks. The baselines include popular proprietary models, state-of-the-art (SOTA) multimodal reasoning models, open-sourced video-large language models, and models fine-tuned on the same training dataset." + ], + "table_footnote": [], + "table_body": "
MethodAvg.UrbanVideo-BenchVSI-Bench
Landmark PositionCounterfactualProgress EvaluationAction GenerationRelative DistanceRelative DirectionRoute PlanningAppearance Order
Random24.019.725.021.816.425.036.128.325.0
Proprietary Models (API)
Qwen-VL-Max[32f]34.144.849.238.829.628.033.329.628.3
GPT-4o[32f]35.736.844.734.233.837.041.331.528.5
Gemini-1.5-Flash[1fps]38.337.842.443.334.437.741.031.537.8
Gemini-1.5-Pro[1fps]39.737.446.238.831.951.346.336.034.6
SOTA Reasoning Models (API)
OpenAI-o1[32f]37.234.653.339.128.039.735.852.939.8
Gemini-2.5-Pro[1fps]40.840.075.038.723.542.034.552.463.6
Open-source Models
LLaVA-NeXT-Video-7B-hf[32f]29.549.520.536.619.225.226.329.924.5
Phi-3.5-vision-instruct[32f]29.049.234.833.215.625.426.536.925.2
Kangaroo[64f]30.035.542.432.532.425.226.823.524.9
InternVL2-2B[32]24.519.345.529.220.925.125.032.623.9
InternVL2-8B[32f]25.523.145.531.521.424.725.728.324.8
InternVL2-40B[32f]25.823.241.732.422.324.925.729.424.5
Qwen2.5-VL-3B-Instruct[1fps]33.132.147.834.031.027.932.639.038.9
Qwen2.5-VL-7B-Instruct[1fps]33.333.321.725.027.835.839.748.838.8
Qwen2.5-VL-72B-Instruct[1fps]34.934.734.826.437.740.829.032.543.9
Supervised Fine-Tuning
Qwen2.5-VL-3B-Instruct[1fps]41.747.733.434.839.242.642.341.243.9
Qwen2.5-VL-7B-Instruct[1fps]45.440.253.438.040.847.846.344.156.1
Proposed Embodied-R
VLM-72B + LLM-3B [≤32f]51.155.159.939.747.650.044.336.872.0
", + "bbox": [ + 84, + 157, + 598, + 608 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/9c8f658d66e081cf723488b6f76f91ee89abd848c964038024d4a57588ef7cd3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 604, + 157, + 919, + 347 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/bee8e503247e6bfc518e9e846c8a4b872251751a5d9ae2c860f2d80f79ade5f7.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
GPT-4oQwen2.5-VL-72B
Openai-o1Qwen2.5-VL-3B
Gemini-1.5-ProQwen2.5-VL-3B-SFT
InternVL2-40BEmbodied-R
", + "bbox": [ + 617, + 359, + 898, + 416 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/5ff31b3efbfea499b28ba5da6236251848769ad72fedf064dac36cb1cd14bf7b.jpg", + "table_caption": [ + "Table 2: Ablation of Key-Frame Extractor" + ], + "table_footnote": [], + "table_body": "
Avg. FrameAcc.Training TimeInference Time
w/o3251.1127.87 h243.68 s
w20.7↓11.349.5↓1.6111.70h↓16.17157.55s↓86.13
", + "bbox": [ + 611, + 450, + 908, + 511 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/d3863b9148a96e8d65b704fc34cde1fe3cea544d324697581654052ba2668f19.jpg", + "table_caption": [ + "Table 3: Ablation of Collaboration." + ], + "table_footnote": [], + "table_body": "
Avg.LPCPEAGRDistRDirRPAO
w/o34.831.845.728.328.141.029.737.546.0
w51.155.159.939.747.650.044.336.872.0
Δ+16.3+23.3+14.2+11.4+19.5+9.0+14.6-0.7+26.0
", + "bbox": [ + 604, + 547, + 916, + 603 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "1.0, train batch size: 32, rollout size: 8, KL coefficient: 0.001, maximum response length: 2048, input length: 6144. When conducting inference on the test set, the temperature is set to 0.5.", + "bbox": [ + 81, + 626, + 482, + 670 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.3 Three-Stage Training Schedule. As for the RL training on the LM, we design a three-stage training schedule to achieve a smooth improvement in training performance. The primary distinction between stages lies in the different weight ratios assigned to three types of rewards.", + "bbox": [ + 81, + 680, + 482, + 750 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Stage 1: In epochs 1 and 2, the goal is to guide the model to follow the \"\" output format. At this stage, the weights are set as $\\omega_{1}:\\omega_{2}:\\omega_{3} = 7:3:0$ . Correct format rewards also assist in locating the answer and reduce misjudgment in accuracy. During this phase, the format reward rapidly converges to 1.", + "- Stage 2: In epochs 3 and 4, the focus shifts to improving the accuracy of the model's responses, guiding the model to produce correct reasoning answers. The weights are set as $\\omega_{1}:\\omega_{2}:\\omega_{3} = 3:7:0$ ." + ], + "bbox": [ + 83, + 757, + 482, + 895 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- Stage 3: In subsequent 5-12 epochs, the aim is to enhance accuracy while simultaneously improving the quality of the \"thinking\" process, ensuring logical consistency between thinking and the answer. The weights are set as $\\omega_{1}:\\omega_{2}:\\omega_{3} = 1:7:2$ .", + "bbox": [ + 514, + 627, + 915, + 683 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 How Does Embodied-R Perform Compared to Existing Video-LLMs?", + "text_level": 1, + "bbox": [ + 513, + 705, + 908, + 739 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To evaluate the effectiveness of the proposed method, in addition to the random baseline, we introduced four categories comprising 17 multimodal large language models capable of processing video inputs:", + "bbox": [ + 513, + 742, + 913, + 797 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Proprietary Models: Cost-effective multimodal models with over 100B parameters, including Qwen-VL-Max [46], GPT-4o [37], Gemini-1.5-Flash [44], and Gemini-1.5-Pro [44].", + "- SOTA Reasoning Models: State-of-the-art reasoning models with the highest performance but significant computational cost, including OpenAI-o1 [38] and Gemini-2.5-Pro [21]." + ], + "bbox": [ + 514, + 811, + 915, + 896 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c318cdb8f4090e0a3adc8de0a1fd78af3544fb0478b79abbbdcf108a3971a0d2.jpg", + "image_caption": [ + "Figure 3: Case Analysis: Embodied-R has initially developed the ability for slow-thinking: it can think before answering, effectively distinguish spatial relationships, provide structured and organized responses, and integrate information across multiple frames for embodied scene analysis." + ], + "image_footnote": [], + "bbox": [ + 106, + 114, + 493, + 359 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2b9e5f7a41386ace07624dec3ec03897c00121bef7fd942a2b707bf0ce1c5755.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 500, + 114, + 885, + 359 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/62b0a49ba975743ab2da724498d96bedd02c9a57a3ea5856833650e9fef3dfdb.jpg", + "image_caption": [ + "Figure 4: Ablation of RL training and comparison to other language models." + ], + "image_footnote": [], + "bbox": [ + 107, + 417, + 455, + 575 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Open-Source Models: Popular open-source multimodal models, including LLaVA-NeXT-Video-7B-hf [29], Phi-3.5-vision-instruct [1], the Internvl2 series [11], and the Qwen-VL series [6].", + "- Supervised Fine-Tuning (SFT): Considering the scarcity of embodied video tasks, the aforementioned models may lack exposure to relevant data. Therefore, Qwen2.5-VL-3B-Instruct [6] and Qwen2.5-VL-7B-Instruct [6] are fine-tuned for these tasks. The results presented in Table 1 lead to the following conclusions:", + "- After undergoing RL training on embodied reasoning tasks, our model significantly outperformed proprietary models as well as OpenAI-o1 and Gemini-2.5-Pro by over $10\\%$ . Moreover, it consistently demonstrated leading performance across various tasks. These results highlight the considerable difficulty of embodied reasoning tasks and indicate that current reasoning models lack generalization capability for such spatial reasoning challenges. On the other hand, the findings confirm that collaborative framework with RL can effectively enhance model reasoning performance in specific domains, especially for tasks that remain poorly solved." + ], + "bbox": [ + 83, + 623, + 500, + 896 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For embodied video reasoning, a highly coupled perception-reasoning problem, the VLM model Qwen2.5-VL-72B-Instruct achieved an accuracy of only $34.9\\%$ through direct inference. In contrast, incorporating a small-scale LM model improved accuracy to $51.1\\%$ . Given limited computational resources for training, the collaborative framework proposed in this study provides an effective solution for balancing model size with hardware constraints.", + "- Under similar computational resource limitations, direct fine-tuning is restricted to models with a size of 7B or smaller. However, the perceptual capacity of small-scale VL models imposes a low upper bound on accuracy compared to Embodied-R. Additionally, fine-tuned models lack the capability for slow-thinking." + ], + "bbox": [ + 514, + 421, + 916, + 602 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Has Embodied-R Learned Slow-Thinking?", + "text_level": 1, + "bbox": [ + 514, + 626, + 898, + 642 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Beyond the quantitative results, we aim to explore whether spatial reasoning capabilities in the output of Embodied-R are improved. As illustrated in Figure 3, after RL training, Embodied-R demonstrates the following human-like reasoning ways:", + "bbox": [ + 513, + 643, + 915, + 700 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Spatial Relationship Reasoning: Accurately inferring the relative spatial relationship between itself and the surrounding environment.", + "- Systematic Analysis: Breaking down problems into components, presenting answers with a \"part-to-whole\" structure, and maintaining clear logical organization.", + "- Contextual Integration: Integrating semantic information across different frames to perform comprehensive analysis.", + "- Think-Answer Format: Strictly adhering to a structured process of reasoning before outputting the final answer." + ], + "bbox": [ + 514, + 714, + 921, + 853 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In summary, Embodied-R demonstrates a certain degree of slow-thinking capability in embodied spatial reasoning.", + "bbox": [ + 513, + 867, + 915, + 896 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/b72f62ac79b3b4ecd587477fe5880cff8321027484d74e9a4a7c196167a6ee4f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 91, + 109, + 295, + 236 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/02393c833bbae8e6c12fcf40fb48a9b3a5d87263d5f9abc11d381f176dd35a56.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 295, + 108, + 496, + 236 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/77957a7d881be79b7f92f5a86858a4906ac4cfb5dbbbfe27cc3a19d2ebeec4a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 496, + 109, + 702, + 237 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5a7204fe5ebc0eb56147d5cc28c1831272e0043c1ada6eb035614ccd44df17a2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 702, + 108, + 911, + 237 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/bcdca8529b76556187765c9b5a87cb0912a7dc6dfab13b6c798397055f198039.jpg", + "image_caption": [ + "Figure 5: a-d. The GRPO training process (a: accuracy reward; b: format reward; c: ratio of logical consistency reward to accuracy reward; d: response length of validation set). e. Comparison of accuracy reward curves for RL training of equivalently sized LM and VLM models. f. Model performance before and after integrating logical consistency reward. g. Comparison of generalization performance between models trained with RL and SFT." + ], + "image_footnote": [], + "bbox": [ + 88, + 243, + 316, + 383 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/a38e1a2a0590e90f4eb270befb537b4680adaf74a94c8e1149fbd2b6a5e1a4c2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 318, + 241, + 630, + 383 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/5b0471981f3a1f19157135899e7655288f65f2ed892ff00e95c43e9724d27af4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 638, + 243, + 910, + 383 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 Contributions of Each Module", + "text_level": 1, + "bbox": [ + 83, + 459, + 374, + 472 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.1 Ablation of Key-Frame Extractor. The role of Key-Frame Extractor is to reduce inference time and training time by retaining essential frames and removing redundant ones while maintaining perceptual quality. As shown in Table 2, with negligible differences in accuracy, training time is significantly reduced by $8.7\\%$ , and single inference time is reduced by approximately one-third.", + "bbox": [ + 81, + 477, + 482, + 561 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.2 Ablation of Collaboration. The collaborative framework enables improved reasoning capabilities under limited computational resources for training. With training-free large-scale pretrained VLMs, it only requires training small-scale LM models to achieve enhanced reasoning performance. As shown in Table 3, with identical key-frame inputs and using the same VLM, Qwen2.5-VL-72B-Instruct, the overall accuracy of collaborative inference is 1.5 times higher than that of the standalone VLM.", + "bbox": [ + 81, + 569, + 482, + 679 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4.3 Ablation of RL Training. RL is central to the LM training in this paper. Without RL training, directly applying the original LM-3B model for reasoning leads to poor performance, as the LM has limited exposure to embodied spatial reasoning data during pretraining. After RL training, the LM achieves significant improvements, with a $27.9\\%$ increase on the UrbanVideo-Bench and a $20.6\\%$ increase on the VSI-Bench benchmarks.", + "bbox": [ + 81, + 688, + 482, + 782 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Given that VLM has already transformed visual inputs into textual representations, we introduced 4 text-based reasoning models (o3-mini [39], Deepseek-R1 [24], Qwen-Max [46], Qwen2.5-7B-Instruct [6]) as baselines to further assess the importance of reasoning capability in the embodied spatial task. The results demonstrate a clear positive correlation between the reasoning ability of the model and its accuracy. The strong performance of Embodied-R may not only stem from its familiarity with the data distribution", + "bbox": [ + 81, + 785, + 482, + 895 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "but also from its synergy with the representations provided by the VLM. Following training, the small-scale LM becomes more attuned to the VLM-generated representations, which translates into enhanced performance on embodied reasoning tasks.", + "bbox": [ + 513, + 460, + 913, + 515 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Further Exploration", + "text_level": 1, + "bbox": [ + 514, + 523, + 709, + 539 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Building upon the aforementioned experiments, we further explore four intriguing RQs related to embodied video-based RL training:", + "bbox": [ + 513, + 542, + 911, + 570 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- RQ4: What Is the Relationship Between Inference Ability, Aha Moments, and Response Length?", + "- RQ5: Why Not Directly Perform RL Training on VLLMs?", + "- RQ6: Is Accuracy+Format Rewards All You Need?", + "- RQ7: RL vs SFT when Generalize to Out-of-Distribution (OOD) Embodied Tasks?" + ], + "bbox": [ + 514, + 579, + 913, + 660 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 Relationship Between Inference Ability, Aha Moments, and Response Length?", + "text_level": 1, + "bbox": [ + 514, + 680, + 883, + 713 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "The GRPO training process is illustrated in Figure 5a-d, which correspond to the validation set's accuracy reward, format reward, ratio of logical consistency reward to accuracy reward, and the response length, respectively. Notably, existing pure-text-based reproductions [55, 59] of DeepSeek-R-Zero models identify inference ability and the \"aha moment\" as key indicators of emergent reasoning capabilities. However, such phenomena are rarely observed in other multimodal reasoning tasks, such as image-based reasoning [10, 33]. This leads us to hypothesize that response length is strongly influenced by the nature of the question itself. For instance, mathematical problems often require multi-step calculations, where increased reasoning length tends to correlate positively with reasoning ability. In contrast, for multimodal reasoning tasks like embodied spatial", + "bbox": [ + 511, + 715, + 913, + 895 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "reasoning, the LM model training process converges toward an optimal range of text output distributions. Concise reasoning patterns may facilitate the embodied spatial reasoning. This highlights the versatility of RL-based post-training method, demonstrating the ability to benefit a wide range of reasoning tasks.", + "bbox": [ + 81, + 106, + 480, + 176 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.2 Why Not Directly Perform RL on VLLMs?", + "text_level": 1, + "bbox": [ + 83, + 186, + 465, + 203 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We previously attempted direct RL training on the Qwen-VL-3B-Instruct model. As shown in Figure 5e, under similar training parameters and time, the performance of the VLM was notably inferior to that of the LM. Upon convergence, the VLM achieved an accuracy of $43.8\\%$ on the test set, significantly lower than the LM. The limited perceptual capability of the VLM restricts its potential for reasoning improvements. Therefore, under resource-constrained conditions, collaborative inference integrating models of different scales present a promising solution.", + "bbox": [ + 81, + 205, + 482, + 330 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.3 Is Accuracy+Format Rewards All You Need?", + "text_level": 1, + "bbox": [ + 83, + 343, + 480, + 358 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "According to the Deepseek-R1-Zero, it appears that accuracy and format rewards are enough to guide the model toward correct reasoning. However, during training in our problem, we observed instances of reward hacking, where the model optimizes the answer but the reasoning process leading to that answer is inconsistent with the answer itself. We aim to ensure alignment between the model's reasoning process and its answer, both to enhance generalization and improve the interpretability of the reasoning process. As shown in Figure 5f, we employ GPT-4o to evaluate the proportion of logically consistent outputs on the test set before and after incorporating a logical consistency reward. This proportion increased from $46.01\\%$ to $99.43\\%$ after the reward was added, demonstrating the value of this approach in addressing embodied spatial multiple-choice reasoning tasks. Moreover, this reward mechanism could potentially be extended to other reasoning tasks prone to answer accuracy hacking during training.", + "bbox": [ + 81, + 362, + 482, + 583 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4 RL vs SFT when Generalize to", + "text_level": 1, + "bbox": [ + 83, + 595, + 369, + 609 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Out-of-Distribution (OOD) Embodied Tasks?", + "text_level": 1, + "bbox": [ + 124, + 611, + 480, + 626 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For small-scale LMs, we aim to explore their generalization performance when trained with SFT instead of RL. To evaluate this, we introduced two OOD datasets: EgoSchema and the egocentric task in MVBench. As discussed in Sections 4.1.1, these two OOD datasets differ significantly from the training set in both task content and scene characteristics. The accuracy results are shown in Figure 5g. RL-trained models demonstrate generalization ability across both datasets. On the EgoSchema dataset, the RL-trained language model under the Embodied-R framework even achieve performance comparable to the state-of-the-art multimodal reasoning model, Gemini2.5-Pro. SFT-trained models showed improvement on EgoSchema but a decline on MVBench. This suggests that slow reasoning, as employed in RL models, could be a promising approach to improve the generalization capabilities even for small-scale models.", + "bbox": [ + 81, + 630, + 482, + 824 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 83, + 835, + 207, + 849 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To address embodied spatial reasoning tasks, we propose a collaborative framework that leverages the perceptual capabilities of large-scale VLMs and the reasoning potential of compact LMs.", + "bbox": [ + 81, + 854, + 482, + 896 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Through 90 hours of RL training on a 3B LM using 8 NVIDIA A800-SXM4-40GB GPUs, Embodied-R surpasses OpenAI-o1 by $13.9\\%$ and Gemini-2.5-Pro by $10.3\\%$ on the test set. Other Key findings include: (1) RL training leads to output length convergence, aligning with the requirements of the task; (2) the reasoning upper bound of same-scale VLMs trained with RL is significantly lower than that of Embodied-R, due to inherent limitations in perception; (3) the proposed logical consistency reward enhances reasoning quality; and (4) models trained via RL exhibit stronger generalization on out-of-distribution datasets compared to those trained with SFT.", + "bbox": [ + 511, + 106, + 913, + 244 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 514, + 263, + 607, + 276 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. 2024. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219 (2024).", + "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ige Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023).", + "[3] Michael Ahn, Debidatta Dwibedi, Chelsea Finn, Montse Gonzalez Arenas, Keerthana Gopalakrishnan, Karol Hausman, Brian Ichter, Alex Irpan, Nikhil Joshi, Ryan Julian, et al. 2024. Autort: Embodied foundation models for large scale orchestration of robotic agents. arXiv preprint arXiv:2401.12963 (2024).", + "[4] Cameron A Aubin, Benjamin Gorissen, Edoardo Milana, Philip R Buskohl, Nathan Lazarus, Geoffrey A Slipher, Christoph Keplinger, Josh Bongard, Fumiya Iida, Jennifer A Lewis, et al. 2022. Towards enduring autonomous robots via embodied energy. Nature 602, 7897 (2022), 393-402.", + "[5] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. 2022. Scanqa: 3d question answering for spatial scene understanding. In proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 19129-19139.", + "[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025).", + "[7] Keshigeyan Chandrasegaran, Agrim Gupta, Lea M Hadzic, Taran Kota, Jimming He, Cristóbal Eyzaguirre, Zane Durante, Manling Li, Jiajun Wu, and Fei-Fei Li. 2024. Hourvideo: 1-hour video-language understanding. Advances in Neural Information Processing Systems 37 (2024), 53168-53197.", + "[8] Bolei Chen, Jiaxu Kang, Ping Zhong, Yixiong Liang, Yu Sheng, and Jianxin Wang. 2024. Embodied Contrastive Learning with Geometric Consistency and Behavioral Awareness for Object Navigation. In Proceedings of the 32nd ACM International Conference on Multimedia, 4776-4785.", + "[9] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. 2024. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 14455-14465.", + "[10] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. 2025. R1-V: Reinforcing Super Generalization Ability in Vision-Language Models with Less Than $3. https://github.com/Deep-Agent/R1-V. Accessed: 2025-02-02.", + "[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198.", + "[12] Sijie Cheng, Kichen Fang, Yangyang Yu, Sicheng Zhou, Bohao Li, Ye Tian, Tingguang Li, Lei Han, and Yang Liu. 2024. Videogthink: Assessing egocentric video understanding capabilities for embodied ai. arXiv preprint arXiv:2410.11623 (2024).", + "[13] Stephanie Clarke and Judit Miklossy. 1990. Occipital cortex in man: Organization of callosal connections, related myelo-and cytoarchitecture, and putative boundaries of functional visual areas. Journal of Comparative Neurology 298, 2 (1990), 188-214.", + "[14] Maël Donoso, Anne GE Collins, and Etienne Koechlin. 2014. Foundations of human reasoning in the prefrontal cortex. Science 344, 6191 (2014), 1481-1486.", + "[15] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aankansha Chowdhery, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, Wenlong Huang, et al. 2023. Palm-e: An embodied multimodal language model. (2023).", + "[16] Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong-Li Lee, and Wynne Hsu. 2024. Video-of-thought: Step-by-step video reasoning from perception to cognition. arXiv preprint arXiv:2501.03230 (2024).", + "[17] Nanyi Fei, Zhiwu Lu, Yizhao Gao, Guoxing Yang, Yuqi Huo, Jingyuan Wen, Haoyu Lu, Ruihua Song, Xin Gao, Tao Xiang, et al. 2022. Towards artificial general intelligence via a multimodal foundation model. Nature Communications" + ], + "bbox": [ + 517, + 279, + 913, + 893 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "13, 1 (2022), 3094.", + "[18] Leonardo Fogassi, Pier Francesco Ferrari, Benno Gesierich, Stefano Rozzi, Fabian Chersi, and Giacomo Rizzolatti. 2005. Parietal lobe: from action organization to intention understanding. Science 308, 5722 (2005), 662-667.", + "[19] Lucia Foglia and Robert A Wilson. 2013. Embodied cognition. Wiley Interdisciplinary Reviews: Cognitive Science 4, 3 (2013), 319-325.", + "[20] Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. 2024. EmbodiedCity: A Benchmark Platform for Embodied Agent in Real-world City Environment. arXiv preprint arXiv:2410.09604 (2024).", + "[21] Google. 2024. Gemini API. https://ai.google.dev/gemini-api. Accessed: 2025-04-12.", + "[22] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. 2022. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 18995-19012.", + "[23] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rStar-Math: Small LLMs Can Master Math Reasoning with Self-Evolved Deep Thinking. arXiv preprint arXiv:2501.04519 (2025).", + "[24] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025).", + "[25] Shima Imani, Liang Du, and Harsh Shrivastava. 2023. Mathprompter: Mathematical reasoning using large language models. arXiv preprint arXiv:2303.05398 (2023).", + "[26] James Intriligator and Patrick Cavanagh. 2001. The spatial resolution of visual attention. Cognitive psychology 43, 3 (2001), 171-216.", + "[27] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. 2024. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 22195-22206.", + "[28] Tianlin Li, Qian Liu, Tianyu Pang, Chao Du, Qing Guo, Yang Liu, and Min Lin. 2024. Purifying large language models by assembling a small language model. arXiv preprint arXiv:2402.14845 (2024).", + "[29] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. 2023. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122 (2023).", + "[30] Fangyu Liu, Guy Emerson, and Nigel Collier. 2023. Visual spatial reasoning. Transactions of the Association for Computational Linguistics 11 (2023), 635-651.", + "[31] Hongbin Liu, Yongze Zhao, Peng Dong, Xiuyi Guo, and Yilin Wang. 2024. IOFTracker: A Two-Stage Multiple Targets Tracking Method Using Spatial-Temporal Fusion Algorithm. Applied Sciences 15, 1 (2024), 107.", + "[32] Yang Liu, Weixing Chen, Yongjie Bai, Xiaodan Liang, Guanbin Li, Wen Gao, and Liang Lin. 2024. Aligning cyber space with physical world: A comprehensive survey on embodied ai. arXiv preprint arXiv:2407.06886 (2024).", + "[33] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025).", + "[34] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. 2023. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems 36 (2023), 46212-46244.", + "[35] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413 (2024).", + "[36] Yao Mu, Qinglong Zhang, Mengkang Hu, Wenhai Wang, Mingyu Ding, Jun Jin, Bin Wang, Jifeng Dai, Yu Qiao, and Ping Luo. 2023. Embodiedgpt: Vision-language pre-training via embodied chain of thought. Advances in Neural Information Processing Systems 36 (2023), 25081-25094.", + "[37] OpenAI. 2024. GPT-4o API. https://openai.com/api/. Accessed: 2025-04-12.", + "[38] OpenAI. 2024. Learning to Reason with LLMs. https://openai.com/index/learning-to-reason-with-llms/ Accessed: 2025-03-04.", + "[39] OpenAI. 2025. OpenAI o3-mini. https://openai.com/index/openai-o3-mini/ Accessed: 2025-04-15.", + "[40] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. 2025. RI on incorrect synthetic data scales the efficiency of lll math reasoning by eight-fold. Advances in Neural Information Processing Systems 37 (2025), 43000-43031.", + "[41] Dhruv Shah, Blazej Osinski, Sergey Levine, et al. 2023. Lm-nav: Robotic navigation with large pre-trained models of language, vision, and action. In Conference on robot learning. PMLR, 492–504.", + "[42] Alessandro Suglia, Claudio Greco, Katie Baker, Jose L Part, Ioannis Papaioannou, Arash Eshghi, Ioannis Konstas, and Oliver Lemon. 2024. Alanavlm: A multimodal embodied ai foundation model for egocentric video understanding. arXiv preprint arXiv:2406.13807 (2024)." + ], + "bbox": [ + 86, + 109, + 480, + 893 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[43] Guangzhi Sun, Yudong Yang, Jimin Zhuang, Changli Tang, Yixuan Li, Wei Li, Zejun MA, and Chao Zhang. 2025. video-SALMONN-01: Reasoning-enhanced Audio-visual Large Language Model. arXiv preprint arXiv:2502.11775 (2025).", + "[44] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. 2023. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023).", + "[45] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with lms. arXiv preprint arXiv:2501.12599 (2025).", + "[46] Qwen Team. 2024. Qwen-VL-Max. https://qwenlm.github.io/blog/qwen-vl-max/. Accessed: 2025-04-12.", + "[47] Qwen Team. 2024. QwQ: Reflect Deeply on the Boundaries of the Unknown. https://qwenlm.github.io/blog/qwq-32b-preview/", + "[48] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186 (2025).", + "[49] Dennis Ulmer, Martin Gubri, Hwaran Lee, Sangdoo Yun, and Seong Joon Oh. 2024. Calibrating large language models using their generations only. arXiv preprint arXiv:2403.05973 (2024).", + "[50] Fali Wang, Zhiwei Zhang, Xianren Zhang, Zongyu Wu, Tzuhao Mo, Qiuhao Lu, Wanjing Wang, Rui Li, Junjie Xu, Xianfeng Tang, et al. 2024. A comprehensive survey of small language models in the era of large language models: Techniques, enhancements, applications, collaboration with llms, and trustworthiness. arXiv preprint arXiv:2411.03350 (2024).", + "[51] Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Sharon Li, and Neel Joshi. 2024. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. Advances in Neural Information Processing Systems 37 (2024), 75392-75421.", + "[52] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. 2024. Embodiedscan: A holistic multi-modal 3d perception suite towards embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1975-1976.", + "[53] Zhecan Wang, Garrett Bingham, Adams Wei Yu, Quoc V Le, Thang Luong, and Golnaz Ghiasi. 2024. Haloquest: A visual hallucination dataset for advancing multimodal reasoning. In European Conference on Computer Vision. Springer, 288-304.", + "[54] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837.", + "[55] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-RL: Unleashing LLM Reasoning with Rule-Based Reinforcement Learning. arXiv preprint arXiv:2502.14768 (2025).", + "[56] Cheng Xu, Xiaofeng Hou, Jiacheng Liu, Chao Li, Tianhao Huang, Xiaozhi Zhu, Mo Niu, Lingyu Sun, Peng Tang, Tongqiao Xu, et al. 2023. Mmbench: Benchmarking end-to-end multi-modal dnns and understanding their hardware-software implications. In 2023 IEEE International Symposium on Workload Characterization (IISWC). IEEE, 154-166.", + "[57] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. 2024. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122 (2024).", + "[58] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024).", + "[59] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 2025. 7B Model and 8K Examples: Emerging Reasoning with Reinforcement Learning is Both Effective and Efficient. https://hkust-nlp.notion.site/simplerl-reason. Notion Blog.", + "[60] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. 2025. Rest-mcts*: LIm self-training via process reward guided tree search. Advances in Neural Information Processing Systems 37 (2025), 64735-64772.", + "[61] Yiming Zhang, Nicholas Carlini, and Daphne Ippolito. 2023. Effective prompt extraction from language models. arXiv preprint arXiv:2307.06865 (2023).", + "[62] Baining Zhao, Jianjie Fang, Zichao Dai, Ziyou Wang, Jirong Zha, Weichen Zhang, Chen Gao, Yue Wang, Jinqiang Cui, Xinlei Chen, and Yong Li. 2025. UrbanVideo-Bench: Benchmarking Vision-Language Models on Embodied Intelligence with Video Data in Urban Spaces. arXiv:2503.06157 [cs.CV] https://arxiv.org/abs/2503.06157", + "[63] Theodore Zhao, Mu Wei, J Samuel Preston, and Hoifung Poon. 2023. Automatic Calibration and Error Correction for Generative Large Language Models via Pareto Optimal Self-Supervision. (2023).", + "[64] Karl Zilles and Katrin Amunts. 2010. Centenary of Brodmann's map—conception and fate. Nature Reviews Neuroscience 11, 2 (2010), 139-145." + ], + "bbox": [ + 517, + 109, + 911, + 893 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "A Appendix", + "text_level": 1, + "bbox": [ + 83, + 104, + 200, + 122 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.1 Dataset Introduction", + "text_level": 1, + "bbox": [ + 83, + 125, + 302, + 140 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "UrbanVideo-Bench: UrbanVideo-Bench is one of the training and testing datasets designed for embodied reasoning (embodied-r). This benchmark was proposed by Tsinghua University in February 2025. It captures two embodied characteristics of urban environments: complex urban scenes featuring dynamic and static elements, and unique aerial navigation scenarios. The dataset consists of 4 categories and 16 tasks, aimed at evaluating Video-LLMs in terms of recall, perception, reasoning, and navigation capabilities. In our paper, we focus on 4 of these complex tasks for reinforcement learning in video-based learning: Landmark Position, Counterfactual Reasoning, Progress Evaluation, and Action Generation, which represent challenging embodied outdoor tasks.", + "bbox": [ + 81, + 143, + 482, + 309 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "VSI-Bench: VSI-Bench is another training and testing dataset for embodied reasoning (embodied-r). Proposed by Fei-Fei Li's team at Stanford in December 2024, this benchmark provides high-quality evaluation metrics for assessing the 3D, video-based, visual-spatial intelligence of multimodal large language models (MLLMs). The dataset comprises 2 categories and 8 tasks designed to evaluate key aspects of spatial reasoning. In our paper, we focus on 4 tasks for reinforcement learning in video-based learning: Relative Distance, Relative Direction, Route Planning, and Appearance Order, all of which are categorized as challenging embodied outdoor tasks.", + "bbox": [ + 81, + 310, + 482, + 448 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "EgoSchema: EgoSchema is one of the Out-of-Distribution (OOD) datasets utilized to evaluate the generalization capability of our model. This dataset is specifically designed as a long-form video question-answering benchmark, aimed at assessing modern vision and language systems' ability to understand and reason over extended video content. It provides a rigorous evaluation framework for long video understanding tasks.", + "bbox": [ + 81, + 449, + 482, + 545 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "MVBench: MVBench is another Out-of-Distribution (OOD) dataset employed to test the generalization capability of our model. MVBench consists of 20 complex video tasks, offering a comprehensive benchmark for evaluating the video understanding capabilities of existing multimodal models. This dataset is designed to address diverse and challenging scenarios in video-based reasoning.", + "bbox": [ + 81, + 545, + 482, + 628 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.2 Details of Key-Frame Extractor", + "text_level": 1, + "bbox": [ + 83, + 640, + 383, + 655 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "The goal of key-frame extraction is to ensure sufficient information gain between frames while maintaining a certain degree of overlap. The specific process is as follows:", + "bbox": [ + 81, + 657, + 482, + 700 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Step 1: a perspective transformation is used to model the geometric relationship between frames. Assuming $f_{t}$ is a key-frame, to determine whether $f_{t + 1}$ should also be considered a keyframe, keypoints and descriptors are calculated from $f_{t}$ and $f_{t + 1}$ using the Oriented FAST and Rotated BRIEF (ORB) algorithm:", + "bbox": [ + 81, + 700, + 482, + 768 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\text {K e y p o i n t s} _ {t}, \\text {D e s c r i p t o r s} _ {t} = \\mathrm {O R B} (f _ {t}), \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 773, + 482, + 789 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\text {K e y p o i n t s} _ {t + 1}, \\text {D e s c r i p t o r s} _ {t + 1} = \\operatorname {O R B} \\left(f _ {t + 1}\\right). \\tag {10}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 792, + 480, + 808 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames, identifying corresponding keypoint pairs $\\mathbf{l}_t^{\\mathrm{key}}$ and $\\mathbf{l}_{t + 1}^{\\mathrm{key}}$ . Using the matched keypoint pairs, the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix $\\mathbf{M}$ , which maps the content of $f_{t + 1}$ to the coordinate space of $f_t$ .", + "bbox": [ + 81, + 810, + 482, + 896 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Step 2: The overlap ratio between two frames is then computed. Assuming the size of each video frame is $w \\times h$ , for frames $f_{t}$ and $f_{t+1}$ : $\\mathbf{l}_{t} = \\{ [0,0], [w,0], [w,h], [0,h] \\}$ represents the four corner points of $f_{t}$ ; $\\mathbf{l}_{t+1} = \\{ [0,0], [w,0], [w,h], [0,h] \\}$ represents the four corner points of $f_{t+1}$ . Using the homography matrix $\\mathbf{M}$ , the corner points $\\mathbf{l}_{t+1}$ of $f_{t+1}$ are transformed into the coordinate space of $f_{t}$ : $\\mathbf{l}_{t+1,i}' = \\mathbf{M} \\cdot \\mathbf{l}_{t+1,i}$ , where $\\mathbf{l}_{t+1,i} = [x,y,1]^T$ represents the corner points of $f_{t+1}$ in homogeneous coordinates, and $\\mathbf{l}_{t+1,i}' = [x',y',w']^T$ represents the transformed corner points. The transformed points are further normalized to recover 2D coordinates, resulting in a quadrilateral representing $f_{t+1}$ in $f_{t}$ 's space. In $f_{t}$ 's coordinate space, there are two polygons: Polygon $L_{t}$ is defined by the corner points $\\mathbf{l}_{t}$ of $f_{t}$ ; Polygon $L_{t+1}'$ is defined by the transformed corner points $\\mathbf{l}_{t+1}'$ . Thus, the overlap ratio $c$ is defined as:", + "bbox": [ + 511, + 106, + 913, + 305 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\nc = \\frac {\\operatorname {A r e a} \\left(L _ {t} \\cap L _ {t + 1} ^ {\\prime}\\right)}{\\operatorname {A r e a} _ {\\text {t o t a l}}}. \\tag {11}\n$$\n", + "text_format": "latex", + "bbox": [ + 647, + 314, + 911, + 347 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "If $c$ is less than a predefined threshold $\\varepsilon$ , it indicates significant visual changes between the frames, and $f_{t+1}$ is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between $f_t$ and $f_{t+2}$ . This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as $\\mathbf{f}' = [f_{k_0}, f_{k_1}, \\dots, f_{k_n}]$ , the keyframe extraction process can be summarized as:", + "bbox": [ + 511, + 354, + 913, + 507 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {f} ^ {\\prime} = \\mathrm {K} - \\text {E x t r a c t o r} (\\mathbf {f}). \\tag {12}\n$$\n", + "text_format": "latex", + "bbox": [ + 650, + 515, + 911, + 529 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.3 Details of Data Preparation", + "text_level": 1, + "bbox": [ + 514, + 545, + 784, + 561 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A.3.1 Task Selection Criteria. In our study, we carefully selected specific tasks that emphasize spatial reasoning capabilities during motion within three-dimensional physical space. The selection process was guided by several key considerations:", + "bbox": [ + 511, + 563, + 911, + 618 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Focus on Reasoning Processes: We prioritized tasks that require deep cognitive processing rather than simple recognition or recall. As highlighted in the main text, embodied spatial reasoning involves complex spatio-temporal relationships where agents must discover object associations across frames and extract task-relevant semantics. For instance, navigation tasks require agents to infer their location from historical observations, construct mental maps, develop high-level plans, and determine specific actions—processes that demand sophisticated reasoning capabilities.", + "bbox": [ + 511, + 619, + 913, + 743 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Diversity in Spatial Contexts: To ensure comprehensive evaluation, we selected tasks from both indoor (VSI-Bench) and outdoor (UrbanVideo-Bench) environments, providing diverse spatial contexts that test different aspects of embodied reasoning. This diversity is crucial for evaluating the generalizability of our approach across varying spatial scales and environmental complexities.", + "bbox": [ + 511, + 743, + 913, + 825 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Emphasis on Long Reasoning Chains: We specifically targeted tasks characterized by long spatial reasoning chains and historically low accuracy rates. These challenging tasks better demonstrate the value of our \"slow thinking\" approach, which encourages thorough reasoning before generating responses—similar to how", + "bbox": [ + 511, + 827, + 913, + 896 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/7ccac7711d85d0b54aad2b7e8ab68869f72c239a6fc670904f642d5397345cc2.jpg", + "table_caption": [ + "Table 4: Hyperparameters used in reinforcement learning training of Embodied-R." + ], + "table_footnote": [], + "table_body": "
HyperparameterValue
OptimizerAdamW
Learning Rate5e-7
Temperature1.0
Train Batch Size32
Rollout Size8
KL Coefficient0.001
Maximum Response Length2048
Input Length6144
Training Epochs12
", + "bbox": [ + 156, + 145, + 410, + 299 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "recent advances in mathematical and scientific reasoning have benefited from reinforcement learning techniques.", + "bbox": [ + 81, + 314, + 482, + 340 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Deterministic Evaluation: All selected tasks were formulated as multiple-choice question-answering problems to ensure determinism in answers, facilitating both RL training and direct calculation of accuracy for performance evaluation.", + "bbox": [ + 81, + 342, + 482, + 398 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.3.2 Question Filtering Methodology. To ensure the quality and validity of our dataset, we implemented a rigorous question filtering process:", + "bbox": [ + 81, + 404, + 480, + 445 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Blind Testing Filter: We first evaluated questions using an untrained 7B language model without video input (blind selection). Questions that could be correctly answered without visual information were identified as potentially problematic, as they might rely more on textual patterns or common knowledge rather than genuine spatial reasoning based on video content.", + "bbox": [ + 81, + 446, + 482, + 527 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "SFT-based Filtering: After conducting supervised fine-tuning (SFT) without video inputs, we analyzed which question types", + "bbox": [ + 81, + 529, + 482, + 558 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "showed significant improvement in accuracy. Categories where the model's performance increased substantially without visual information were flagged for removal, as this indicated strong correlations between question text and answers that could be exploited without actual spatial reasoning.", + "bbox": [ + 511, + 106, + 913, + 175 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Correlation Analysis: We specifically eliminated question types where:", + "bbox": [ + 513, + 176, + 913, + 202 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The model could achieve high accuracy without accessing video content", + "- Performance improved dramatically after text-only SFT training", + "- Question-answer pairs exhibited strong textual patterns that could be exploited without spatial understanding" + ], + "bbox": [ + 545, + 205, + 911, + 289 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This filtering methodology ensured that our final dataset genuinely tests embodied spatial reasoning capabilities rather than linguistic pattern matching or prior knowledge exploitation. By removing questions with strong text-answer correlations, we created a more challenging and valid benchmark that requires models to truly understand spatial relationships from video content.", + "bbox": [ + 513, + 291, + 913, + 375 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A.4 RL Hyperparameters", + "text_level": 1, + "bbox": [ + 514, + 386, + 733, + 402 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The reinforcement learning (RL) training of Embodied-R requires careful hyperparameter tuning to balance computational efficiency with model performance. We conducted extensive experiments to determine the optimal configuration for our collaborative framework. The key hyperparameters used in our RL training process are summarized in Table 4. These settings were selected to ensure stable training while maximizing the model's embodied reasoning capabilities. Notably, we used a relatively small learning rate (5e-7) to prevent catastrophic forgetting and a moderate KL coefficient (0.001) to maintain proximity to the reference model while allowing sufficient exploration.", + "bbox": [ + 511, + 404, + 913, + 556 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_model.json b/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_model.json new file mode 100644 index 0000000000000000000000000000000000000000..edef8dc3efea657c3df8abb2c26eff9fbe170f2a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_model.json @@ -0,0 +1,3425 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.12680v1 [cs.AI] 17 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.149, + 0.101, + 0.851, + 0.178 + ], + "angle": 0, + "content": "Embodied-R: Collaborative Framework for Activating Embodied Spatial Reasoning in Foundation Models via Reinforcement Learning" + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.189, + 0.813, + 0.226 + ], + "angle": 0, + "content": "Baining Zhao*, Ziyou Wang*, Jianjie Fang*, Chen Gao†, Fanghang Man, Jinqiang Cui, Xin Wang, Xinlei Chen†, Yong Li, Wenwu Zhu" + }, + { + "type": "text", + "bbox": [ + 0.417, + 0.227, + 0.583, + 0.244 + ], + "angle": 0, + "content": "Tsinghua University" + }, + { + "type": "image", + "bbox": [ + 0.336, + 0.244, + 0.355, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.36, + 0.245, + 0.455, + 0.261 + ], + "angle": 0, + "content": "Project Page" + }, + { + "type": "image", + "bbox": [ + 0.601, + 0.243, + 0.621, + 0.258 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.624, + 0.245, + 0.664, + 0.259 + ], + "angle": 0, + "content": "Code" + }, + { + "type": "title", + "bbox": [ + 0.275, + 0.284, + 0.312, + 0.295 + ], + "angle": 0, + "content": "Tasks" + }, + { + "type": "image", + "bbox": [ + 0.111, + 0.301, + 0.302, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.305, + 0.303, + 0.387, + 0.311 + ], + "angle": 0, + "content": "① Landmark Position" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.312, + 0.459, + 0.318 + ], + "angle": 0, + "content": "what is your current position relative to [landmark] in" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.318, + 0.382, + 0.323 + ], + "angle": 0, + "content": "[navigation instruction]" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.328, + 0.413, + 0.336 + ], + "angle": 0, + "content": "② Counterfactual Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.336, + 0.479, + 0.342 + ], + "angle": 0, + "content": "Can you still reach destination if moving in another direction?" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.346, + 0.391, + 0.354 + ], + "angle": 0, + "content": "3Progress Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.354, + 0.447, + 0.362 + ], + "angle": 0, + "content": "Which step the navigation is currently perform in" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.371, + 0.386, + 0.379 + ], + "angle": 0, + "content": "4 Action Generation" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.38, + 0.463, + 0.386 + ], + "angle": 0, + "content": "What is your next action given [navigation instruction]?" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.396, + 0.382, + 0.404 + ], + "angle": 0, + "content": "\\(⑤\\) Relative Distance" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.404, + 0.425, + 0.411 + ], + "angle": 0, + "content": "Which object is the closest to [object A]?" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.416, + 0.384, + 0.423 + ], + "angle": 0, + "content": "6 Relative Direction" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.423, + 0.464, + 0.43 + ], + "angle": 0, + "content": "If you are standing by [object A] and facing [object B], is" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.43, + 0.425, + 0.436 + ], + "angle": 0, + "content": "the [object C] to your left, right, or back?" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.44, + 0.374, + 0.447 + ], + "angle": 0, + "content": "7 Route Planning" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.448, + 0.399, + 0.454 + ], + "angle": 0, + "content": "How to navigate to [object A]?" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.459, + 0.387, + 0.466 + ], + "angle": 0, + "content": "Appearance Order" + }, + { + "type": "text", + "bbox": [ + 0.306, + 0.466, + 0.468, + 0.473 + ], + "angle": 0, + "content": "What will be the first-time appearance order of [object A," + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.474, + 0.412, + 0.48 + ], + "angle": 0, + "content": "object B, object C] in your memory?" + }, + { + "type": "text", + "bbox": [ + 0.318, + 0.485, + 0.333, + 0.49 + ], + "angle": 0, + "content": "··" + }, + { + "type": "image", + "bbox": [ + 0.478, + 0.341, + 0.57, + 0.426 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.485, + 0.44, + 0.565, + 0.451 + ], + "angle": 0, + "content": "Embodied-R" + }, + { + "type": "title", + "bbox": [ + 0.699, + 0.284, + 0.769, + 0.297 + ], + "angle": 0, + "content": "Reasoning" + }, + { + "type": "image", + "bbox": [ + 0.583, + 0.298, + 0.886, + 0.321 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.585, + 0.324, + 0.867, + 0.347 + ], + "angle": 0, + "content": "[Example] Question: Navigation Instruction given at initial position: [Observe around, then fly towards the road, then turn left and land on the roof of the building on the left]. What are you doing now?" + }, + { + "type": "text", + "bbox": [ + 0.585, + 0.348, + 0.84, + 0.356 + ], + "angle": 0, + "content": "Choose: A. I look around the square area. B. I fly towards the road." + }, + { + "type": "text", + "bbox": [ + 0.618, + 0.357, + 0.835, + 0.364 + ], + "angle": 0, + "content": "C. I turn left and land on the roof of the building on the left." + }, + { + "type": "text", + "bbox": [ + 0.618, + 0.365, + 0.78, + 0.372 + ], + "angle": 0, + "content": "D. I fly over the park. E. I land." + }, + { + "type": "text", + "bbox": [ + 0.591, + 0.376, + 0.622, + 0.389 + ], + "angle": 0, + "content": "E.I land" + }, + { + "type": "image", + "bbox": [ + 0.63, + 0.376, + 0.65, + 0.392 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.665, + 0.375, + 0.73, + 0.383 + ], + "angle": 0, + "content": "Fast-Thinking" + }, + { + "type": "text", + "bbox": [ + 0.802, + 0.391, + 0.871, + 0.4 + ], + "angle": 0, + "content": "Slow-Thinking" + }, + { + "type": "text", + "bbox": [ + 0.585, + 0.403, + 0.868, + 0.41 + ], + "angle": 0, + "content": "Think: Let's analyze the event sequence and the instruction step by step:" + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.411, + 0.807, + 0.418 + ], + "angle": 0, + "content": "1. Initial Position: I start by observing around the square area." + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.419, + 0.786, + 0.426 + ], + "angle": 0, + "content": "2. Next Step: After observing, I fly towards the highway." + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.427, + 0.799, + 0.434 + ], + "angle": 0, + "content": "3. Subsequent Step: Once at the highway, I should turn left." + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.435, + 0.845, + 0.442 + ], + "angle": 0, + "content": "4. Final Step: Finally, I should land on the roof of the building on the left" + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.443, + 0.668, + 0.451 + ], + "angle": 0, + "content": "From the video frames:" + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.453, + 0.854, + 0.46 + ], + "angle": 0, + "content": "- The initial frames show buildings and a square area, indicating the phase." + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.461, + 0.884, + 0.468 + ], + "angle": 0, + "content": "- The subsequent frames show a transition from the square area to a broader view." + }, + { + "type": "text", + "bbox": [ + 0.584, + 0.47, + 0.862, + 0.476 + ], + "angle": 0, + "content": "- The later frames depict a clear view of the road and surrounding buildings." + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.479, + 0.602, + 0.494 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.604, + 0.485, + 0.725, + 0.492 + ], + "angle": 0, + "content": "Answer: B. I fly towards the road." + }, + { + "type": "image", + "bbox": [ + 0.736, + 0.48, + 0.754, + 0.494 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.507, + 0.914, + 0.536 + ], + "angle": 0, + "content": "Figure 1: Embodied spatial reasoning: tasks and thinking process. Challenging tasks from public embodied video datasets are identified, encompassing both indoor and outdoor scenarios. We introduce slow-thinking to improve reasoning performance." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.542, + 0.158, + 0.555 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.559, + 0.486, + 0.81 + ], + "angle": 0, + "content": "Humans can perceive and reason about spatial relationships from sequential visual observations, such as egocentric video streams. However, how pretrained models acquire such abilities, especially high-level reasoning, remains unclear. This paper introduces Embodied-R, a collaborative framework combining large-scale Vision-Language Models (VLMs) for perception and small-scale Language Models (LMs) for reasoning. Using Reinforcement Learning (RL) with a novel reward system considering think-answer logical consistency, the model achieves slow-thinking capabilities with limited computational resources. After training on only 5k embodied video samples, Embodied-R with a 3B LM matches state-of-the-art multimodal reasoning models (OpenAI-o1, Gemini-2.5-pro) on both in-distribution and out-of-distribution embodied spatial reasoning tasks. Embodied-R also exhibits emergent thinking patterns such as systematic analysis and contextual integration. We further explore research questions including response length, training on VLM, strategies for reward design, and differences in model generalization after SFT (Supervised Fine-Tuning) and RL training." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.836, + 0.219, + 0.85 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.854, + 0.482, + 0.897 + ], + "angle": 0, + "content": "On the path toward Artificial General Intelligence (AGI) [17], we hope that pre-trained foundation models can not only perform tasks such as dialogue and image understanding in the cyber world [2, 44]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.542, + 0.916, + 0.653 + ], + "angle": 0, + "content": "but also develop human-like embodied spatial cognition in the three-dimensional physical world, enabling them to perceive, think, and move [4, 32]. The fundamental way humans achieve spatial cognition is through continuous, dynamic visual observations, akin to video streams [26, 30]. For example, by observing their surroundings, humans can infer their position relative to nearby objects. Similarly, based on historical visual observations, humans can determine the actions they should take to reach a target destination." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.654, + 0.915, + 0.861 + ], + "angle": 0, + "content": "Visual spatial cognition can be divided into two levels: perception and reasoning [51]. Perception refers to \"what is seen\", characterized by direct, low-level tasks such as object recognition, edge detection, or color differentiation [52]. Reasoning, on the other hand, involves \"what is understood\" and \"what actions to take\", which are indirect and higher-level tasks requiring logical inference and knowledge integration [62]. Examples of reasoning include \"Where did I come from?\" (e.g., recalling historical movement trajectories [36]), \"Where am I?\" (e.g., inferring the spatial relationships between nearby objects and distances [5]), and \"Where do I want to go?\" (e.g., planning actions and deciding movements to reach a destination [8]). While most existing research focuses on improving the perception capabilities of foundation models [6, 11], with notable progress, their spatial reasoning abilities remain limited [9, 58], and methods for enhancement are largely unexplored." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.861, + 0.915, + 0.888 + ], + "angle": 0, + "content": "Specifically, video-based spatial reasoning poses several challenges, as follows:" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.084, + 0.107, + 0.482, + 0.176 + ], + "angle": 0, + "content": "- Reasoning is always built upon perception [19, 32]. For the studied problem, continuous visual observations impose higher demands on perception. Reasoning cannot be well achieved with faulty perceptions or hallucinations [53]. It is challenging to reason when it is already hard to perceive from the videos." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.177, + 0.483, + 0.315 + ], + "angle": 0, + "content": "- Video data naturally involves complex spatio-temporal relationships, requiring the discovery of object associations across frames and the extraction of semantics relevant to the reasoning task [16]. For instance, to navigate to a destination outside the current field of view, one must infer their location from historical visual observations, build a mental map of the environment, develop a high-level plan to determine the direction, and finally decide on specific actions to execute. Existing supervised fine-tuning (SFT) training methods lack supervision for the reasoning process, making it difficult to handle such reasoning tasks [62]." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.316, + 0.482, + 0.55 + ], + "angle": 0, + "content": "- Embodied visual observations have distinct characteristics. First, understanding disembodied videos, such as movies or TV shows, primarily emphasizes the content within the video, often from a broad and objective perspective [27]. In contrast, egocentric videos focus on understanding the relationship between the observer and the surrounding environment, often from a constrained first-person perspective [22]. Second, embodied continuous visual observations are generated over time, indicating that embodied perception should rely on sequential inputs rather than aggregating all visual observations for a single input after a prolonged period [31]. Finally, due to the continuity of motion in the physical world, egocentric visual observations also exhibit spatial continuity, meaning there is significant redundancy and repetition between frames. Consequently, directly applying existing multimodal large language models (MLLMs) to embodied videos leads to issues, including loss of generalization and input token limits caused by excessive redundant frames [1, 29]." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.107, + 0.483, + 0.55 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.564, + 0.483, + 0.688 + ], + "angle": 0, + "content": "Recently, the impressive performance of OpenAI's o1/o3 [38] and DeepSeek-R1 [24] in solving complex reasoning problems(e.g., mathematics, coding, science, etc.) has drawn attention to reinforcement learning (RL) techniques. By incorporating the chain-of-thought (CoT) reasoning process into post-training, large language models (LLMs) demonstrate a \"slow-thinking\" mode, where they reason thoroughly before generating responses [45, 55]. Inspired by this, we attempt to introduce \"slow thinking\" into embodied video-based spatial reasoning tasks, as shown in Figure 1." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.689, + 0.483, + 0.84 + ], + "angle": 0, + "content": "This brings a new challenge: the trade-off between model size and computational cost. Existing studies suggest a strong correlation between multimodal understanding/perception capabilities and model size [7, 20, 56]. Since reasoning builds on perception, larger vision-language foundation models should be used as the starting point for training. However, increasing model size leads to often unacceptable computational costs. Additionally, video inputs map to long token sequences, further raising computational demands. Is there a way to leverage the perception capabilities of large-scale models while developing embodied reasoning abilities at a lower computational cost?" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.841, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Inspired by neuroscience [64], spatial perception and reasoning involve distinct brain regions: visual perception occurs in the visual areas of the occipital lobe [13], basic spatial understanding in the parietal lobe [18], and complex spatial reasoning in the prefrontal" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.356 + ], + "angle": 0, + "content": "cortex [14]. This inspired the design of a collaborative framework with two main components: a large-scale vision-language model (VLM) for perception and a small-scale language model (LM) for reasoning. Based on the continuity of observations, we first propose a key-frame extractor to retain critical information while reducing computational costs. Using a VLM, we sequentially extract semantic information from the frames, which simulates real-world online reasoning while effectively managing the input token length of VLMs for long video inputs. Finally, the semantic information and reasoning question are fed into the small-scale language model, which outputs the reasoning process and final answers. The small-scale language model is trained with RL, where the reward modeling not only incorporates rule-based rewards inspired by Deepseek-R1-Zero [24] but, more importantly, introduces a novel reward for the logical consistency of the reasoning process. In the experiments, we explore seven research questions, covering the framework's performance, RL's role in activating embodied spatial reasoning, and out-of-distribution generalization capabilities." + }, + { + "type": "text", + "bbox": [ + 0.53, + 0.357, + 0.91, + 0.371 + ], + "angle": 0, + "content": "In general, the main contributions of this paper are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.384, + 0.912, + 0.466 + ], + "angle": 0, + "content": "- We propose a collaborative framework for large-scale and small-scale foundation models to address spatial reasoning in the video modality. By decoupling perception and reasoning, the framework leverages the perceptual strength of large-scale foundation models while efficiently enhancing the reasoning capabilities of smaller models in a computationally resource-friendly manner." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.468, + 0.912, + 0.535 + ], + "angle": 0, + "content": "- This is the first work to employ reinforcement learning (RL) to enhance the embodied spatial reasoning abilities of foundation models. Specifically, we introduce a novel logical consistency reward, which improves the alignment between reasoning processes and generated answers." + }, + { + "type": "text", + "bbox": [ + 0.516, + 0.536, + 0.915, + 0.619 + ], + "angle": 0, + "content": "- Our proposed Embodied-R achieves performance comparable to state-of-the-art multimodal large language models (e.g., OpenAI-o1/Gemini-2.5-Pro) on both in-distribution and out-of-distribution benchmarks. We further investigate research questions including the generalization comparison between models trained by SFT & RL, reward design strategies, etc." + }, + { + "type": "list", + "bbox": [ + 0.516, + 0.384, + 0.915, + 0.619 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.643, + 0.66, + 0.657 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.661, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Large Language Model Reasoning. Recently, enhancing reasoning capabilities has become a key focus in large model technologies, demonstrating remarkable performance on tasks such as mathematical and logical problem-solving [25, 47, 57]. Following the release of OpenAI's o1 [38], numerous studies have proposed various technical approaches to achieve similar functionalities, including Chain-of-Thought (CoT) [54], Monte Carlo Tree Search (MCTS) [23, 60], distillation [35], rejection sampling combined with supervised fin-tuning (SFT) or Direct Preference Optimization (DPO) [40], among others. Furthermore, Deepseek-r1 [24] introduced a method to foster the emergence of reasoning abilities in large language models (LLMs) through rule-based rewards combined with reinforcement learning. Similarly, Kimi k1.5 [45] proposed a comparable approach, presenting various training techniques, such as curriculum learning. This reinforcement learning paradigm has sparked significant interest, with subsequent works successfully reproducing related results [55, 59]." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.3 + ], + "angle": 0, + "content": "Embodied Spatial Reasoning with VLMs. Inspired by the generality of foundation models across various domains [2, 3], embodied intelligence aims to develop agents that utilize large multimodal models as their \"brains\" to achieve perception, navigation, and manipulation in the 3D physical world [15, 41]. In terms of input, human visual-spatial perception is more akin to continuous RGB observations, similar to video streams [12, 42], rather than static images [48] or point clouds [52]. Several embodied video benchmarks [58] demonstrate that, while perception tasks are relatively well-addressed, spatial reasoning tasks—such as spatial relationship inference, navigation, and planning—remain highly challenging. However, existing research [16, 43] on video reasoning primarily focuses on disembodied content reasoning, with little emphasis on scenarios involving embodied continuous visual inputs." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.301, + 0.482, + 0.468 + ], + "angle": 0, + "content": "Collaboration between large and small models. Existing research primarily focuses on addressing the resource consumption and privacy risks associated with large models, as well as the efficiency and performance advantages of small models in specific scenarios [50]. Small models can assist large models in data selection, prompt optimization, and reasoning enhancement [28, 61]. The use of small models to detect hallucinations and privacy leakage is explored in [49, 63], improving overall system reliability. While our work shares the goal of reducing computational resource demands, it differs by emphasizing the complementary roles of large-scale VLMs in perception and small-scale LMs in enhancing embodied spatial reasoning." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.481, + 0.32, + 0.495 + ], + "angle": 0, + "content": "3 The Embodied-R Method" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.499, + 0.483, + 0.556 + ], + "angle": 0, + "content": "We first define the problem of embodied spatial reasoning. Subsequently, we introduce the VLM-based perception module and the LM-based reasoning module. The collaborative framework is shown in Figure 2." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.568, + 0.305, + 0.582 + ], + "angle": 0, + "content": "3.1 Problem Formulation" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.587, + 0.482, + 0.684 + ], + "angle": 0, + "content": "In the physical world, an agent moves through space, generating a sequence of video frames (continuous visual observations) \\(\\mathbf{f} = [f_0, f_1, \\dots, f_T]\\). Suppose a spatial reasoning problem is denoted as \\(q\\). Our goal is to build a model that takes \\(q\\) and \\(\\mathbf{f}\\) as inputs and outputs an answer \\(a\\). The answer \\(a\\) is considered correct if it is semantically consistent with the ground truth \\(g\\); otherwise, it is deemed incorrect." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.698, + 0.408, + 0.714 + ], + "angle": 0, + "content": "3.2 Large-Scale VLM-based Perception" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.717, + 0.482, + 0.854 + ], + "angle": 0, + "content": "3.2.1 Key-Frame Extractor. As the agent moves continuously in space, high sampling frequencies result in significant overlap between consecutive frames. On one hand, the VLM relies on changes in the static objects within the environment across frames to infer the agent's pose variation. On the other hand, excessive overlap between frames leads to increased inference costs for both the VLM and LLM. To address this, we designed a key-frame extractor tailored to the characteristics of embodied videos, selecting key frames that retain overlap while ensuring sufficient information gain between them." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.855, + 0.482, + 0.897 + ], + "angle": 0, + "content": "The extraction of key-frames is based on the overlap of visual fields caused by motion continuity. When the agent moves forward, the visual content in the latter frame is expected to overlap with a" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.914, + 0.205 + ], + "angle": 0, + "content": "portion of the former frame, and the reverse is true when moving backward. Similarly, during left or right rotations, the latter frame should partially overlap with the former frame in the horizontal direction, and during upward or downward rotations, the overlap occurs in the vertical direction. Given that the sampling frequency of visual observations is typically much higher than the agent's motion speed, frames generally exhibit significant overlap." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.205, + 0.915, + 0.481 + ], + "angle": 0, + "content": "Specifically, a perspective transformation is used to model the geometric relationship between frames. Assuming \\( f_{t} \\) is a key-frame, to determine whether \\( f_{t+1} \\) should also be considered a keyframe, keypoints and descriptors are calculated from \\( f_{t} \\) and \\( f_{t+1} \\) using the Oriented FAST and Rotated BRIEF (ORB) algorithm. Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames and the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix. The overlap ratio between two frames is then computed. If overlap ratio is less than a predefined threshold, it indicates significant visual changes between the frames, and \\( f_{t+1} \\) is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between \\( f_{t} \\) and \\( f_{t+2} \\). This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as \\( \\mathbf{f}' = [f_{k_0}, f_{k_1}, \\dots, f_{k_n}] \\), the keyframe extraction process can be summarized as:" + }, + { + "type": "equation", + "bbox": [ + 0.651, + 0.489, + 0.914, + 0.505 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} ^ {\\prime} = \\mathrm {K} - \\operatorname {E x t r a c t o r} (\\mathbf {f}). \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.513, + 0.915, + 0.692 + ], + "angle": 0, + "content": "3.2.2 Embodied Semantic Representation. Since perceptual capability is positively correlated with model size [27, 58, 62], we employ a large-scale VLM to process visual inputs to ensure high-quality perception. The differential information of each key frame is described sequentially. This approach provides two key benefits: 1) The sequential and dynamic processing aligns better with the characteristics of embodied scenarios, where visual observations are continuously generated over time. At each moment, the model should integrate historical semantic representations with the latest visual observations, rapidly updating the semantic understanding of spatial perception. 2) It facilitates the handling of long videos by avoiding the input token limitations that arise when all frames are processed simultaneously by the VLM." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.693, + 0.914, + 0.75 + ], + "angle": 0, + "content": "Specifically, for the first frame, the VLM identifies the objects present in the scene, their attributes, and their spatial locations. For subsequent frames, both the previous frame and the current frame are input into the VLM to extract key semantic representation \\( s_{k_j} \\):" + }, + { + "type": "equation", + "bbox": [ + 0.601, + 0.757, + 0.915, + 0.775 + ], + "angle": 0, + "content": "\\[\ns _ {k _ {j}} \\sim \\psi_ {\\theta} (s | f _ {k _ {j - 1}}, f _ {k _ {j}}; q), j = 1, 2, \\dots , n, \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.781, + 0.72, + 0.796 + ], + "angle": 0, + "content": "where \\( s_{k_j} \\) consists of three items:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.799, + 0.913, + 0.826 + ], + "angle": 0, + "content": "- Action: Inferring the agent's actions based on the changes in visual observations between consecutive frames." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.827, + 0.914, + 0.868 + ], + "angle": 0, + "content": "- \\(\\Delta\\) Information: Determining changes in the spatial relationships between the agent and known objects, as well as identifying whether new objects appear in the field of view." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.869, + 0.913, + 0.896 + ], + "angle": 0, + "content": "- \\( q \\)-related content: Detecting whether objects or information relevant to the reasoning task appear in the latest field of view." + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.799, + 0.914, + 0.896 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "image", + "bbox": [ + 0.086, + 0.104, + 0.918, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.462, + 0.916, + 0.52 + ], + "angle": 0, + "content": "Figure 2: The proposed Embodied-R is a collaborative embodied spatial reasoning framework integrating a Vision-Language Model (VLM) and a Language Model (LM). The separation of perception and reasoning enables us to leverage the perceptual capabilities of large-scale VLMs while training a resource-efficient small-scale LM to activate embodied reasoning through RL. Notably, we introduce a novel logical consistency reward to guide the LM in producing logically coherent reasoning and answer." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.539, + 0.483, + 0.569 + ], + "angle": 0, + "content": "In this way, we can extract spatial semantic representations \\(\\mathbf{s} = [s_{k_0}, s_{k_1}, \\dots, s_{k_n}]\\) from the keyframe \\(\\mathbf{f}'\\)." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.593, + 0.395, + 0.609 + ], + "angle": 0, + "content": "3.3 Small-Scale LM-based Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.611, + 0.483, + 0.667 + ], + "angle": 0, + "content": "Given semantic perception, we can train a training-friendly small-scale language model capable of performing embodied spatial reasoning. Assuming the small-scale LM is denoted as \\(\\pi_{\\theta}\\), the response \\(o\\) inferred from the model can be expressed as: \\(o \\sim \\pi_{\\theta}(o \\mid q, s)\\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.667, + 0.483, + 0.791 + ], + "angle": 0, + "content": "Our training objective is to ensure that the model adheres to the \"think-then-answer\" paradigm, where the thinking process is logical, and the answer is correct. We follow DeepSeek-R1-Zero and adopt a computationally efficient RL training strategy, Group Relative Policy Optimization (GRPO). Besides rule-based format and accuracy rewards, we propose a novel reasoning process reward tailored for embodied reasoning tasks to mitigate reward hacking and enhance the logical consistency between the reasoning process and the final answer." + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.813, + 0.483, + 0.897 + ], + "angle": 0, + "content": "3.3.1 Group Relative Policy Optimization. For a given query \\( q \\) and semantic annotation \\( s \\), GRPO generates a group of outputs \\( \\{o_1, o_2, \\dots, o_G\\} \\) using the reference policy \\( \\pi_{\\mathrm{ref}} \\). The reference policy typically refers to the original model not trained via GRPO. The policy model \\( \\pi_\\theta \\) is then updated by optimizing the following objective:" + }, + { + "type": "equation", + "bbox": [ + 0.531, + 0.573, + 0.914, + 0.642 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {J} (\\theta) = \\mathbb {E} _ {(q, s) \\sim \\mathbb {D}, \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\text {o l d}} (o | q, s)} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q , s)}{\\pi_ {\\text {o l d}} (o _ {i} | q , s)} A _ {i}, \\right. \\right. \\right. \\tag {3} \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q , \\mathbf {s})}{\\pi_ {\\mathrm {o l d}} (o _ {i} | q , \\mathbf {s})}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\right) - \\beta \\mathcal {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} \\| \\pi_ {\\mathrm {r e f}})) \\Biggr ], \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.662, + 0.916, + 0.73 + ], + "angle": 0, + "content": "where \\(\\epsilon\\) and \\(\\beta\\) are hyperparameters, and \\(\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\mathrm{ref}})\\) is KL divergence penalty: \\(\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\mathrm{ref}}) = \\pi_{\\mathrm{ref}}(r_i|q,\\mathbf{s})\\log \\frac{\\pi_{\\mathrm{ref}}(r_i|q,\\mathbf{s})}{\\pi_{\\theta}(r_i|q,\\mathbf{s})} -1.\\) \\(A_{i}\\) represents the advantage corresponding to the output \\(o_i\\) , calculated from the corresponding \\(\\{r_1,r_2,\\dots ,r_G\\} :A_i = \\frac{r_i - \\mathrm{mean}(\\{r_1,r_2,\\dots,r_G\\})}{\\mathrm{std}(\\{r_1,r_2,\\dots,r_G\\})}\\)" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.744, + 0.916, + 0.84 + ], + "angle": 0, + "content": "3.3.2 Reward Modeling. Reward modeling is a critical component of RL algorithms, as their design guides the direction of model optimization. We propose three types of rewards: format reward, accuracy reward, and logical consistency reward. These are designed to respectively guide the model to learn the \"think-answer\" reasoning pattern, accurate embodied spatial reasoning, and logical consistency between reasoning and the answer." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.841, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Format Reward: We aim for the model to output \\( o_i \\) by first producing an embodied reasoning process \\( p_i \\) followed by the final answer \\( a_i \\). The reasoning process and answer are enclosed within \\(
\\) and \\(
\\) tags, respectively:" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.107, + 0.113, + 0.465, + 0.266 + ], + "angle": 0, + "content": "Please assume the role of an agent. Given a question and a series of frames, you should first think about the reasoning process in the mind and then provide the final answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Ensure that your answer is consistent with and directly derived from your thinking process, maintaining logical coherence between the two sections. The frames represent your egocentric observations from the past to the present. Question: q. Video: f'. Assistant:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.281, + 0.483, + 0.311 + ], + "angle": 0, + "content": "A regular expression is applied to evaluate whether \\( o_i \\) meets the specified requirements, thereby generating the format reward \\( r_i' \\):" + }, + { + "type": "equation", + "bbox": [ + 0.184, + 0.317, + 0.483, + 0.354 + ], + "angle": 0, + "content": "\\[\nr _ {i} ^ {\\prime} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t ;} \\\\ 0, & \\text {i f f o r m a t i s i n c o r r e c t .} \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.358, + 0.485, + 0.428 + ], + "angle": 0, + "content": "Accuracy Reward: The accuracy reward \\( r_i^{\\prime \\prime} \\) model assesses whether the answer \\( a_i \\) is semantically consistent with the ground truth \\( g \\). For example, multiple-choice questions typically have precise and unique answers, which can be easily extracted when the response adheres to the specified format." + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.433, + 0.483, + 0.47 + ], + "angle": 0, + "content": "\\[\nr _ {i} ^ {\\prime \\prime} = \\left\\{ \\begin{array}{l l} 1, & a _ {i} = g; \\\\ 0, & a _ {i} \\neq g. \\end{array} \\right. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.476, + 0.483, + 0.67 + ], + "angle": 0, + "content": "Logical Consistency Reward: When using only the format reward and accuracy reward, we consistently observed hacking behaviors. Specifically, for spatial reasoning tasks where the possible answers are limited (e.g., the relative position of an object with respect to the agent's body), cases arise where an incorrect reasoning process \\( p_i \\) leads to a correct answer \\( a_i \\), which is mistakenly assigned a positive reward. As such cases accumulate, the logical consistency of the model's responses deteriorates. To address this issue, we introduce a simple yet effective process reward. Our goal is to ensure a lower bound on logical consistency, such that the reasoning ability of \\( \\pi_{\\theta} \\) should not degrade below that of the reference model \\( \\pi_{\\mathrm{ref}} \\). Therefore, when the model's answer is correct \\( (a_i = g) \\), we input the question \\( q \\) and reasoning process \\( p_i \\) into the reference model without providing video frames, yielding an answer:" + }, + { + "type": "equation", + "bbox": [ + 0.225, + 0.676, + 0.482, + 0.693 + ], + "angle": 0, + "content": "\\[\na _ {i} ^ {\\prime} \\sim \\pi_ {\\text {r e f}} (a | q, p _ {i}). \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.698, + 0.483, + 0.741 + ], + "angle": 0, + "content": "If \\( a_i' \\) is consistent with \\( a_i \\), it indicates that the reasoning process can logically lead to the answer; otherwise, it reflects a logical inconsistency between the reasoning process and the answer." + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.747, + 0.483, + 0.782 + ], + "angle": 0, + "content": "\\[\nr _ {i} ^ {\\prime \\prime \\prime} = \\left\\{ \\begin{array}{l l} 1, & a _ {i} = a _ {i} ^ {\\prime} = g; \\\\ 0, & \\text {e l s e .} \\end{array} \\right. \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.788, + 0.483, + 0.815 + ], + "angle": 0, + "content": "Total Reward: The total reward is a linear combination of the three rewards mentioned above:" + }, + { + "type": "equation", + "bbox": [ + 0.202, + 0.822, + 0.483, + 0.84 + ], + "angle": 0, + "content": "\\[\nr _ {i} = \\omega_ {1} r _ {i} ^ {\\prime} + \\omega_ {2} r _ {i} ^ {\\prime \\prime} + \\omega_ {3} r _ {i} ^ {\\prime \\prime \\prime}. \\tag {8}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.85, + 0.22, + 0.865 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.083, + 0.868, + 0.484, + 0.897 + ], + "angle": 0, + "content": "We first provide the details of the experimental setup and then demonstrate the following: quantitative results, qualitative results," + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.107, + 0.914, + 0.136 + ], + "angle": 0, + "content": "and ablation studies. These correspond to addressing the following three research questions (RQs):" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.138, + 0.914, + 0.164 + ], + "angle": 0, + "content": "- RQ1: How does Embodied-R perform compared to existing video-LLMs?" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.165, + 0.843, + 0.179 + ], + "angle": 0, + "content": "- RQ2: Has Embodied-R learned slow-thinking?" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.18, + 0.863, + 0.193 + ], + "angle": 0, + "content": "- RQ3: What are the contributions of each module?" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.138, + 0.914, + 0.193 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.207, + 0.722, + 0.223 + ], + "angle": 0, + "content": "4.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.225, + 0.915, + 0.556 + ], + "angle": 0, + "content": "4.1.1 Data Preparation. We primarily focus on spatial reasoning problems during motion within three-dimensional physical space to evaluate the effectiveness of our method. For this purpose, we selected two embodied video datasets as the main training and testing sets: VSI-Bench [58], which contains indoor first-person navigation data., and UrbanVideo-Bench [62], which consists of outdoor embodied data captured by drones navigating through aerial spaces. These datasets provide diversity in scenarios by incorporating both outdoor and indoor video data. Based on the content of the tasks, we specifically selected four distinct types of tasks from each dataset, characterized by long spatial reasoning chains and low accuracy. These tasks are formulated as multiple-choice question-answering problems, ensuring determinism in answers to facilitate RL training and allowing direct calculation of accuracy to evaluate performance. Across eight task categories, the dataset covers multiple levels of spatial reasoning, comprising a total of 5,415 QA pairs and 1,492 videos. Additionally, we include two out-of-distribution dataset, EgoSchema [34] and Egocentric task in MVBench [27]. EgoSchema is designed for task-level reasoning from a first-person perspective, with 500 QA pairs and 500 videos available in its fully open-source portion. MVBench encompasses the embodied task of egocentric navigation, comprising 200 QA pairs and 200 corresponding videos. These datasets serve to evaluate the generalization capability of the trained model." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.558, + 0.915, + 0.806 + ], + "angle": 0, + "content": "To ensure comprehensive evaluation, we conducted five repeated experiments. The dataset was randomly divided into five equal parts and 5-fold cross-validation is adopted. The final testing results are averaged across the five experiments. Furthermore, we address the issue of potential semantic bias in the datasets. For instance, in action generation tasks, forward movement may inherently have a higher correctness rate than adjusting the gimbal angle, which is a characteristic of the task itself. To prevent the testing performance from being influenced by the model learning textual distribution rather than truly understanding the spatial information in video, we implement an additional filtering step for the testing set. Specifically, we train a LLM through supervised fine-tuning using only the textual QA pairs from the training set, without video inputs. If a question in the testing set can be correctly answered by the finetuned LLM but not by the original LLM, it indicates semantic bias in that QA pair. These biased QA pairs are excluded from the testing set as they fail to accurately assess the spatial reasoning capabilities of models." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.813, + 0.917, + 0.897 + ], + "angle": 0, + "content": "4.1.2 Implementation Details. We use Qwen2.5-3B-Instruct [57] as the small-scale LM and Qwen2.5-VL-72B-Instruct [6] as large-scale VLM. Both training and inference processes were conducted using 8 NVIDIA A800-SXM4-40GB GPUs, with each RL training requiring approximately 90 GPU hours. Other key hyperparameters for training are as follows: learning rate: 5e-7, temperature:" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.104, + 0.916, + 0.149 + ], + "angle": 0, + "content": "Table 1: Accuracy of Embodied-R and baselines on 8 indoor and outdoor embodied spatial reasoning tasks. The baselines include popular proprietary models, state-of-the-art (SOTA) multimodal reasoning models, open-sourced video-large language models, and models fine-tuned on the same training dataset." + }, + { + "type": "table", + "bbox": [ + 0.085, + 0.159, + 0.599, + 0.609 + ], + "angle": 0, + "content": "
MethodAvg.UrbanVideo-BenchVSI-Bench
Landmark PositionCounterfactualProgress EvaluationAction GenerationRelative DistanceRelative DirectionRoute PlanningAppearance Order
Random24.019.725.021.816.425.036.128.325.0
Proprietary Models (API)
Qwen-VL-Max[32f]34.144.849.238.829.628.033.329.628.3
GPT-4o[32f]35.736.844.734.233.837.041.331.528.5
Gemini-1.5-Flash[1fps]38.337.842.443.334.437.741.031.537.8
Gemini-1.5-Pro[1fps]39.737.446.238.831.951.346.336.034.6
SOTA Reasoning Models (API)
OpenAI-o1[32f]37.234.653.339.128.039.735.852.939.8
Gemini-2.5-Pro[1fps]40.840.075.038.723.542.034.552.463.6
Open-source Models
LLaVA-NeXT-Video-7B-hf[32f]29.549.520.536.619.225.226.329.924.5
Phi-3.5-vision-instruct[32f]29.049.234.833.215.625.426.536.925.2
Kangaroo[64f]30.035.542.432.532.425.226.823.524.9
InternVL2-2B[32]24.519.345.529.220.925.125.032.623.9
InternVL2-8B[32f]25.523.145.531.521.424.725.728.324.8
InternVL2-40B[32f]25.823.241.732.422.324.925.729.424.5
Qwen2.5-VL-3B-Instruct[1fps]33.132.147.834.031.027.932.639.038.9
Qwen2.5-VL-7B-Instruct[1fps]33.333.321.725.027.835.839.748.838.8
Qwen2.5-VL-72B-Instruct[1fps]34.934.734.826.437.740.829.032.543.9
Supervised Fine-Tuning
Qwen2.5-VL-3B-Instruct[1fps]41.747.733.434.839.242.642.341.243.9
Qwen2.5-VL-7B-Instruct[1fps]45.440.253.438.040.847.846.344.156.1
Proposed Embodied-R
VLM-72B + LLM-3B [≤32f]51.155.159.939.747.650.044.336.872.0
" + }, + { + "type": "image", + "bbox": [ + 0.606, + 0.158, + 0.92, + 0.348 + ], + "angle": 0, + "content": null + }, + { + "type": "table", + "bbox": [ + 0.618, + 0.361, + 0.9, + 0.417 + ], + "angle": 0, + "content": "
GPT-4oQwen2.5-VL-72B
Openai-o1Qwen2.5-VL-3B
Gemini-1.5-ProQwen2.5-VL-3B-SFT
InternVL2-40BEmbodied-R
" + }, + { + "type": "table_caption", + "bbox": [ + 0.622, + 0.434, + 0.9, + 0.448 + ], + "angle": 0, + "content": "Table 2: Ablation of Key-Frame Extractor" + }, + { + "type": "table", + "bbox": [ + 0.612, + 0.451, + 0.909, + 0.512 + ], + "angle": 0, + "content": "
Avg. FrameAcc.Training TimeInference Time
w/o3251.1127.87 h243.68 s
w20.7↓11.349.5↓1.6111.70h↓16.17157.55s↓86.13
" + }, + { + "type": "table_caption", + "bbox": [ + 0.644, + 0.528, + 0.878, + 0.542 + ], + "angle": 0, + "content": "Table 3: Ablation of Collaboration." + }, + { + "type": "table", + "bbox": [ + 0.605, + 0.549, + 0.918, + 0.604 + ], + "angle": 0, + "content": "
Avg.LPCPEAGRDistRDirRPAO
w/o34.831.845.728.328.141.029.737.546.0
w51.155.159.939.747.650.044.336.872.0
Δ+16.3+23.3+14.2+11.4+19.5+9.0+14.6-0.7+26.0
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.627, + 0.483, + 0.671 + ], + "angle": 0, + "content": "1.0, train batch size: 32, rollout size: 8, KL coefficient: 0.001, maximum response length: 2048, input length: 6144. When conducting inference on the test set, the temperature is set to 0.5." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.681, + 0.483, + 0.751 + ], + "angle": 0, + "content": "4.1.3 Three-Stage Training Schedule. As for the RL training on the LM, we design a three-stage training schedule to achieve a smooth improvement in training performance. The primary distinction between stages lies in the different weight ratios assigned to three types of rewards." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.758, + 0.483, + 0.841 + ], + "angle": 0, + "content": "- Stage 1: In epochs 1 and 2, the goal is to guide the model to follow the \"\" output format. At this stage, the weights are set as \\(\\omega_{1}:\\omega_{2}:\\omega_{3} = 7:3:0\\). Correct format rewards also assist in locating the answer and reduce misjudgment in accuracy. During this phase, the format reward rapidly converges to 1." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.841, + 0.482, + 0.896 + ], + "angle": 0, + "content": "- Stage 2: In epochs 3 and 4, the focus shifts to improving the accuracy of the model's responses, guiding the model to produce correct reasoning answers. The weights are set as \\(\\omega_{1}:\\omega_{2}:\\omega_{3} = 3:7:0\\)." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.758, + 0.483, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.628, + 0.916, + 0.684 + ], + "angle": 0, + "content": "- Stage 3: In subsequent 5-12 epochs, the aim is to enhance accuracy while simultaneously improving the quality of the \"thinking\" process, ensuring logical consistency between thinking and the answer. The weights are set as \\(\\omega_{1}:\\omega_{2}:\\omega_{3} = 1:7:2\\)." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.707, + 0.91, + 0.74 + ], + "angle": 0, + "content": "4.2 How Does Embodied-R Perform Compared to Existing Video-LLMs?" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.743, + 0.915, + 0.799 + ], + "angle": 0, + "content": "To evaluate the effectiveness of the proposed method, in addition to the random baseline, we introduced four categories comprising 17 multimodal large language models capable of processing video inputs:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.813, + 0.916, + 0.854 + ], + "angle": 0, + "content": "- Proprietary Models: Cost-effective multimodal models with over 100B parameters, including Qwen-VL-Max [46], GPT-4o [37], Gemini-1.5-Flash [44], and Gemini-1.5-Pro [44]." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.855, + 0.915, + 0.897 + ], + "angle": 0, + "content": "- SOTA Reasoning Models: State-of-the-art reasoning models with the highest performance but significant computational cost, including OpenAI-o1 [38] and Gemini-2.5-Pro [21]." + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.813, + 0.916, + 0.897 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "image", + "bbox": [ + 0.107, + 0.115, + 0.495, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.501, + 0.115, + 0.887, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.365, + 0.916, + 0.407 + ], + "angle": 0, + "content": "Figure 3: Case Analysis: Embodied-R has initially developed the ability for slow-thinking: it can think before answering, effectively distinguish spatial relationships, provide structured and organized responses, and integrate information across multiple frames for embodied scene analysis." + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.419, + 0.456, + 0.577 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.083, + 0.585, + 0.483, + 0.613 + ], + "angle": 0, + "content": "Figure 4: Ablation of RL training and comparison to other language models." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.624, + 0.5, + 0.666 + ], + "angle": 0, + "content": "- Open-Source Models: Popular open-source multimodal models, including LLaVA-NeXT-Video-7B-hf [29], Phi-3.5-vision-instruct [1], the Internvl2 series [11], and the Qwen-VL series [6]." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.666, + 0.483, + 0.74 + ], + "angle": 0, + "content": "- Supervised Fine-Tuning (SFT): Considering the scarcity of embodied video tasks, the aforementioned models may lack exposure to relevant data. Therefore, Qwen2.5-VL-3B-Instruct [6] and Qwen2.5-VL-7B-Instruct [6] are fine-tuned for these tasks. The results presented in Table 1 lead to the following conclusions:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.744, + 0.484, + 0.897 + ], + "angle": 0, + "content": "- After undergoing RL training on embodied reasoning tasks, our model significantly outperformed proprietary models as well as OpenAI-o1 and Gemini-2.5-Pro by over \\(10\\%\\). Moreover, it consistently demonstrated leading performance across various tasks. These results highlight the considerable difficulty of embodied reasoning tasks and indicate that current reasoning models lack generalization capability for such spatial reasoning challenges. On the other hand, the findings confirm that collaborative framework with RL can effectively enhance model reasoning performance in specific domains, especially for tasks that remain poorly solved." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.624, + 0.5, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.422, + 0.916, + 0.532 + ], + "angle": 0, + "content": "- For embodied video reasoning, a highly coupled perception-reasoning problem, the VLM model Qwen2.5-VL-72B-Instruct achieved an accuracy of only \\(34.9\\%\\) through direct inference. In contrast, incorporating a small-scale LM model improved accuracy to \\(51.1\\%\\). Given limited computational resources for training, the collaborative framework proposed in this study provides an effective solution for balancing model size with hardware constraints." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.533, + 0.918, + 0.603 + ], + "angle": 0, + "content": "- Under similar computational resource limitations, direct fine-tuning is restricted to models with a size of 7B or smaller. However, the perceptual capacity of small-scale VL models imposes a low upper bound on accuracy compared to Embodied-R. Additionally, fine-tuned models lack the capability for slow-thinking." + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.422, + 0.918, + 0.603 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.627, + 0.9, + 0.643 + ], + "angle": 0, + "content": "4.3 Has Embodied-R Learned Slow-Thinking?" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.645, + 0.916, + 0.702 + ], + "angle": 0, + "content": "Beyond the quantitative results, we aim to explore whether spatial reasoning capabilities in the output of Embodied-R are improved. As illustrated in Figure 3, after RL training, Embodied-R demonstrates the following human-like reasoning ways:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.715, + 0.915, + 0.755 + ], + "angle": 0, + "content": "- Spatial Relationship Reasoning: Accurately inferring the relative spatial relationship between itself and the surrounding environment." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.757, + 0.915, + 0.798 + ], + "angle": 0, + "content": "- Systematic Analysis: Breaking down problems into components, presenting answers with a \"part-to-whole\" structure, and maintaining clear logical organization." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.799, + 0.922, + 0.826 + ], + "angle": 0, + "content": "- Contextual Integration: Integrating semantic information across different frames to perform comprehensive analysis." + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.827, + 0.915, + 0.854 + ], + "angle": 0, + "content": "- Think-Answer Format: Strictly adhering to a structured process of reasoning before outputting the final answer." + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.715, + 0.922, + 0.854 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.868, + 0.916, + 0.897 + ], + "angle": 0, + "content": "In summary, Embodied-R demonstrates a certain degree of slow-thinking capability in embodied spatial reasoning." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.092, + 0.111, + 0.296, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.296, + 0.109, + 0.498, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.498, + 0.111, + 0.704, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.704, + 0.109, + 0.912, + 0.238 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.089, + 0.244, + 0.318, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.319, + 0.242, + 0.631, + 0.384 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.639, + 0.244, + 0.911, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.082, + 0.397, + 0.915, + 0.452 + ], + "angle": 0, + "content": "Figure 5: a-d. The GRPO training process (a: accuracy reward; b: format reward; c: ratio of logical consistency reward to accuracy reward; d: response length of validation set). e. Comparison of accuracy reward curves for RL training of equivalently sized LM and VLM models. f. Model performance before and after integrating logical consistency reward. g. Comparison of generalization performance between models trained with RL and SFT." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.46, + 0.375, + 0.473 + ], + "angle": 0, + "content": "4.4 Contributions of Each Module" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.478, + 0.483, + 0.562 + ], + "angle": 0, + "content": "4.4.1 Ablation of Key-Frame Extractor. The role of Key-Frame Extractor is to reduce inference time and training time by retaining essential frames and removing redundant ones while maintaining perceptual quality. As shown in Table 2, with negligible differences in accuracy, training time is significantly reduced by \\(8.7\\%\\), and single inference time is reduced by approximately one-third." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.57, + 0.483, + 0.68 + ], + "angle": 0, + "content": "4.4.2 Ablation of Collaboration. The collaborative framework enables improved reasoning capabilities under limited computational resources for training. With training-free large-scale pretrained VLMs, it only requires training small-scale LM models to achieve enhanced reasoning performance. As shown in Table 3, with identical key-frame inputs and using the same VLM, Qwen2.5-VL-72B-Instruct, the overall accuracy of collaborative inference is 1.5 times higher than that of the standalone VLM." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.689, + 0.483, + 0.784 + ], + "angle": 0, + "content": "4.4.3 Ablation of RL Training. RL is central to the LM training in this paper. Without RL training, directly applying the original LM-3B model for reasoning leads to poor performance, as the LM has limited exposure to embodied spatial reasoning data during pretraining. After RL training, the LM achieves significant improvements, with a \\(27.9\\%\\) increase on the UrbanVideo-Bench and a \\(20.6\\%\\) increase on the VSI-Bench benchmarks." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.786, + 0.483, + 0.896 + ], + "angle": 0, + "content": "Given that VLM has already transformed visual inputs into textual representations, we introduced 4 text-based reasoning models (o3-mini [39], Deepseek-R1 [24], Qwen-Max [46], Qwen2.5-7B-Instruct [6]) as baselines to further assess the importance of reasoning capability in the embodied spatial task. The results demonstrate a clear positive correlation between the reasoning ability of the model and its accuracy. The strong performance of Embodied-R may not only stem from its familiarity with the data distribution" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.461, + 0.914, + 0.516 + ], + "angle": 0, + "content": "but also from its synergy with the representations provided by the VLM. Following training, the small-scale LM becomes more attuned to the VLM-generated representations, which translates into enhanced performance on embodied reasoning tasks." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.524, + 0.71, + 0.54 + ], + "angle": 0, + "content": "5 Further Exploration" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.543, + 0.913, + 0.571 + ], + "angle": 0, + "content": "Building upon the aforementioned experiments, we further explore four intriguing RQs related to embodied video-based RL training:" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.58, + 0.914, + 0.607 + ], + "angle": 0, + "content": "- RQ4: What Is the Relationship Between Inference Ability, Aha Moments, and Response Length?" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.608, + 0.911, + 0.621 + ], + "angle": 0, + "content": "- RQ5: Why Not Directly Perform RL Training on VLLMs?" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.622, + 0.862, + 0.634 + ], + "angle": 0, + "content": "- RQ6: Is Accuracy+Format Rewards All You Need?" + }, + { + "type": "text", + "bbox": [ + 0.515, + 0.636, + 0.912, + 0.661 + ], + "angle": 0, + "content": "- RQ7: RL vs SFT when Generalize to Out-of-Distribution (OOD) Embodied Tasks?" + }, + { + "type": "list", + "bbox": [ + 0.515, + 0.58, + 0.914, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.681, + 0.884, + 0.714 + ], + "angle": 0, + "content": "5.1 Relationship Between Inference Ability, Aha Moments, and Response Length?" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.716, + 0.915, + 0.896 + ], + "angle": 0, + "content": "The GRPO training process is illustrated in Figure 5a-d, which correspond to the validation set's accuracy reward, format reward, ratio of logical consistency reward to accuracy reward, and the response length, respectively. Notably, existing pure-text-based reproductions [55, 59] of DeepSeek-R-Zero models identify inference ability and the \"aha moment\" as key indicators of emergent reasoning capabilities. However, such phenomena are rarely observed in other multimodal reasoning tasks, such as image-based reasoning [10, 33]. This leads us to hypothesize that response length is strongly influenced by the nature of the question itself. For instance, mathematical problems often require multi-step calculations, where increased reasoning length tends to correlate positively with reasoning ability. In contrast, for multimodal reasoning tasks like embodied spatial" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.178 + ], + "angle": 0, + "content": "reasoning, the LM model training process converges toward an optimal range of text output distributions. Concise reasoning patterns may facilitate the embodied spatial reasoning. This highlights the versatility of RL-based post-training method, demonstrating the ability to benefit a wide range of reasoning tasks." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.188, + 0.467, + 0.204 + ], + "angle": 0, + "content": "5.2 Why Not Directly Perform RL on VLLMs?" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.207, + 0.483, + 0.332 + ], + "angle": 0, + "content": "We previously attempted direct RL training on the Qwen-VL-3B-Instruct model. As shown in Figure 5e, under similar training parameters and time, the performance of the VLM was notably inferior to that of the LM. Upon convergence, the VLM achieved an accuracy of \\(43.8\\%\\) on the test set, significantly lower than the LM. The limited perceptual capability of the VLM restricts its potential for reasoning improvements. Therefore, under resource-constrained conditions, collaborative inference integrating models of different scales present a promising solution." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.344, + 0.482, + 0.359 + ], + "angle": 0, + "content": "5.3 Is Accuracy+Format Rewards All You Need?" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.363, + 0.483, + 0.584 + ], + "angle": 0, + "content": "According to the Deepseek-R1-Zero, it appears that accuracy and format rewards are enough to guide the model toward correct reasoning. However, during training in our problem, we observed instances of reward hacking, where the model optimizes the answer but the reasoning process leading to that answer is inconsistent with the answer itself. We aim to ensure alignment between the model's reasoning process and its answer, both to enhance generalization and improve the interpretability of the reasoning process. As shown in Figure 5f, we employ GPT-4o to evaluate the proportion of logically consistent outputs on the test set before and after incorporating a logical consistency reward. This proportion increased from \\(46.01\\%\\) to \\(99.43\\%\\) after the reward was added, demonstrating the value of this approach in addressing embodied spatial multiple-choice reasoning tasks. Moreover, this reward mechanism could potentially be extended to other reasoning tasks prone to answer accuracy hacking during training." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.596, + 0.37, + 0.61 + ], + "angle": 0, + "content": "5.4 RL vs SFT when Generalize to" + }, + { + "type": "title", + "bbox": [ + 0.125, + 0.612, + 0.482, + 0.627 + ], + "angle": 0, + "content": "Out-of-Distribution (OOD) Embodied Tasks?" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.631, + 0.483, + 0.825 + ], + "angle": 0, + "content": "For small-scale LMs, we aim to explore their generalization performance when trained with SFT instead of RL. To evaluate this, we introduced two OOD datasets: EgoSchema and the egocentric task in MVBench. As discussed in Sections 4.1.1, these two OOD datasets differ significantly from the training set in both task content and scene characteristics. The accuracy results are shown in Figure 5g. RL-trained models demonstrate generalization ability across both datasets. On the EgoSchema dataset, the RL-trained language model under the Embodied-R framework even achieve performance comparable to the state-of-the-art multimodal reasoning model, Gemini2.5-Pro. SFT-trained models showed improvement on EgoSchema but a decline on MVBench. This suggests that slow reasoning, as employed in RL models, could be a promising approach to improve the generalization capabilities even for small-scale models." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.836, + 0.208, + 0.85 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.855, + 0.483, + 0.897 + ], + "angle": 0, + "content": "To address embodied spatial reasoning tasks, we propose a collaborative framework that leverages the perceptual capabilities of large-scale VLMs and the reasoning potential of compact LMs." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.246 + ], + "angle": 0, + "content": "Through 90 hours of RL training on a 3B LM using 8 NVIDIA A800-SXM4-40GB GPUs, Embodied-R surpasses OpenAI-o1 by \\(13.9\\%\\) and Gemini-2.5-Pro by \\(10.3\\%\\) on the test set. Other Key findings include: (1) RL training leads to output length convergence, aligning with the requirements of the task; (2) the reasoning upper bound of same-scale VLMs trained with RL is significantly lower than that of Embodied-R, due to inherent limitations in perception; (3) the proposed logical consistency reward enhances reasoning quality; and (4) models trained via RL exhibit stronger generalization on out-of-distribution datasets compared to those trained with SFT." + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.264, + 0.609, + 0.277 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.522, + 0.28, + 0.914, + 0.322 + ], + "angle": 0, + "content": "[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. 2024. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.322, + 0.915, + 0.361 + ], + "angle": 0, + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ige Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.362, + 0.914, + 0.402 + ], + "angle": 0, + "content": "[3] Michael Ahn, Debidatta Dwibedi, Chelsea Finn, Montse Gonzalez Arenas, Keerthana Gopalakrishnan, Karol Hausman, Brian Ichter, Alex Irpan, Nikhil Joshi, Ryan Julian, et al. 2024. Autort: Embodied foundation models for large scale orchestration of robotic agents. arXiv preprint arXiv:2401.12963 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.402, + 0.914, + 0.442 + ], + "angle": 0, + "content": "[4] Cameron A Aubin, Benjamin Gorissen, Edoardo Milana, Philip R Buskohl, Nathan Lazarus, Geoffrey A Slipher, Christoph Keplinger, Josh Bongard, Fumiya Iida, Jennifer A Lewis, et al. 2022. Towards enduring autonomous robots via embodied energy. Nature 602, 7897 (2022), 393-402." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.443, + 0.915, + 0.472 + ], + "angle": 0, + "content": "[5] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. 2022. Scanqa: 3d question answering for spatial scene understanding. In proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 19129-19139." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.473, + 0.914, + 0.502 + ], + "angle": 0, + "content": "[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.503, + 0.914, + 0.542 + ], + "angle": 0, + "content": "[7] Keshigeyan Chandrasegaran, Agrim Gupta, Lea M Hadzic, Taran Kota, Jimming He, Cristóbal Eyzaguirre, Zane Durante, Manling Li, Jiajun Wu, and Fei-Fei Li. 2024. Hourvideo: 1-hour video-language understanding. Advances in Neural Information Processing Systems 37 (2024), 53168-53197." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.543, + 0.914, + 0.582 + ], + "angle": 0, + "content": "[8] Bolei Chen, Jiaxu Kang, Ping Zhong, Yixiong Liang, Yu Sheng, and Jianxin Wang. 2024. Embodied Contrastive Learning with Geometric Consistency and Behavioral Awareness for Object Navigation. In Proceedings of the 32nd ACM International Conference on Multimedia, 4776-4785." + }, + { + "type": "ref_text", + "bbox": [ + 0.523, + 0.583, + 0.914, + 0.623 + ], + "angle": 0, + "content": "[9] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. 2024. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 14455-14465." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.624, + 0.914, + 0.653 + ], + "angle": 0, + "content": "[10] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. 2025. R1-V: Reinforcing Super Generalization Ability in Vision-Language Models with Less Than $3. https://github.com/Deep-Agent/R1-V. Accessed: 2025-02-02." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.654, + 0.914, + 0.702 + ], + "angle": 0, + "content": "[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.704, + 0.914, + 0.743 + ], + "angle": 0, + "content": "[12] Sijie Cheng, Kichen Fang, Yangyang Yu, Sicheng Zhou, Bohao Li, Ye Tian, Tingguang Li, Lei Han, and Yang Liu. 2024. Videogthink: Assessing egocentric video understanding capabilities for embodied ai. arXiv preprint arXiv:2410.11623 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.744, + 0.914, + 0.784 + ], + "angle": 0, + "content": "[13] Stephanie Clarke and Judit Miklossy. 1990. Occipital cortex in man: Organization of callosal connections, related myelo-and cytoarchitecture, and putative boundaries of functional visual areas. Journal of Comparative Neurology 298, 2 (1990), 188-214." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.785, + 0.914, + 0.804 + ], + "angle": 0, + "content": "[14] Maël Donoso, Anne GE Collins, and Etienne Koechlin. 2014. Foundations of human reasoning in the prefrontal cortex. Science 344, 6191 (2014), 1481-1486." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.805, + 0.914, + 0.835 + ], + "angle": 0, + "content": "[15] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aankansha Chowdhery, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, Wenlong Huang, et al. 2023. Palm-e: An embodied multimodal language model. (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.835, + 0.914, + 0.865 + ], + "angle": 0, + "content": "[16] Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong-Li Lee, and Wynne Hsu. 2024. Video-of-thought: Step-by-step video reasoning from perception to cognition. arXiv preprint arXiv:2501.03230 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.519, + 0.866, + 0.914, + 0.895 + ], + "angle": 0, + "content": "[17] Nanyi Fei, Zhiwu Lu, Yizhao Gao, Guoxing Yang, Yuqi Huo, Jingyuan Wen, Haoyu Lu, Ruihua Song, Xin Gao, Tao Xiang, et al. 2022. Towards artificial general intelligence via a multimodal foundation model. Nature Communications" + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.28, + 0.915, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.11, + 0.195, + 0.12 + ], + "angle": 0, + "content": "13, 1 (2022), 3094." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.12, + 0.482, + 0.15 + ], + "angle": 0, + "content": "[18] Leonardo Fogassi, Pier Francesco Ferrari, Benno Gesierich, Stefano Rozzi, Fabian Chersi, and Giacomo Rizzolatti. 2005. Parietal lobe: from action organization to intention understanding. Science 308, 5722 (2005), 662-667." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.15, + 0.482, + 0.171 + ], + "angle": 0, + "content": "[19] Lucia Foglia and Robert A Wilson. 2013. Embodied cognition. Wiley Interdisciplinary Reviews: Cognitive Science 4, 3 (2013), 319-325." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.171, + 0.482, + 0.211 + ], + "angle": 0, + "content": "[20] Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. 2024. EmbodiedCity: A Benchmark Platform for Embodied Agent in Real-world City Environment. arXiv preprint arXiv:2410.09604 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.211, + 0.482, + 0.231 + ], + "angle": 0, + "content": "[21] Google. 2024. Gemini API. https://ai.google.dev/gemini-api. Accessed: 2025-04-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.231, + 0.482, + 0.28 + ], + "angle": 0, + "content": "[22] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. 2022. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 18995-19012." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.281, + 0.482, + 0.312 + ], + "angle": 0, + "content": "[23] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rStar-Math: Small LLMs Can Master Math Reasoning with Self-Evolved Deep Thinking. arXiv preprint arXiv:2501.04519 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.312, + 0.482, + 0.352 + ], + "angle": 0, + "content": "[24] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.352, + 0.482, + 0.382 + ], + "angle": 0, + "content": "[25] Shima Imani, Liang Du, and Harsh Shrivastava. 2023. Mathprompter: Mathematical reasoning using large language models. arXiv preprint arXiv:2303.05398 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.382, + 0.482, + 0.402 + ], + "angle": 0, + "content": "[26] James Intriligator and Patrick Cavanagh. 2001. The spatial resolution of visual attention. Cognitive psychology 43, 3 (2001), 171-216." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.402, + 0.482, + 0.442 + ], + "angle": 0, + "content": "[27] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. 2024. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 22195-22206." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.442, + 0.482, + 0.472 + ], + "angle": 0, + "content": "[28] Tianlin Li, Qian Liu, Tianyu Pang, Chao Du, Qing Guo, Yang Liu, and Min Lin. 2024. Purifying large language models by assembling a small language model. arXiv preprint arXiv:2402.14845 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.472, + 0.482, + 0.502 + ], + "angle": 0, + "content": "[29] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. 2023. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.503, + 0.482, + 0.523 + ], + "angle": 0, + "content": "[30] Fangyu Liu, Guy Emerson, and Nigel Collier. 2023. Visual spatial reasoning. Transactions of the Association for Computational Linguistics 11 (2023), 635-651." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.523, + 0.482, + 0.553 + ], + "angle": 0, + "content": "[31] Hongbin Liu, Yongze Zhao, Peng Dong, Xiuyi Guo, and Yilin Wang. 2024. IOFTracker: A Two-Stage Multiple Targets Tracking Method Using Spatial-Temporal Fusion Algorithm. Applied Sciences 15, 1 (2024), 107." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.553, + 0.482, + 0.583 + ], + "angle": 0, + "content": "[32] Yang Liu, Weixing Chen, Yongjie Bai, Xiaodan Liang, Guanbin Li, Wen Gao, and Liang Lin. 2024. Aligning cyber space with physical world: A comprehensive survey on embodied ai. arXiv preprint arXiv:2407.06886 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.583, + 0.482, + 0.613 + ], + "angle": 0, + "content": "[33] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.613, + 0.482, + 0.653 + ], + "angle": 0, + "content": "[34] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. 2023. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems 36 (2023), 46212-46244." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.654, + 0.482, + 0.693 + ], + "angle": 0, + "content": "[35] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.693, + 0.482, + 0.735 + ], + "angle": 0, + "content": "[36] Yao Mu, Qinglong Zhang, Mengkang Hu, Wenhai Wang, Mingyu Ding, Jun Jin, Bin Wang, Jifeng Dai, Yu Qiao, and Ping Luo. 2023. Embodiedgpt: Vision-language pre-training via embodied chain of thought. Advances in Neural Information Processing Systems 36 (2023), 25081-25094." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.735, + 0.464, + 0.745 + ], + "angle": 0, + "content": "[37] OpenAI. 2024. GPT-4o API. https://openai.com/api/. Accessed: 2025-04-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.745, + 0.482, + 0.764 + ], + "angle": 0, + "content": "[38] OpenAI. 2024. Learning to Reason with LLMs. https://openai.com/index/learning-to-reason-with-llms/ Accessed: 2025-03-04." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.764, + 0.482, + 0.783 + ], + "angle": 0, + "content": "[39] OpenAI. 2025. OpenAI o3-mini. https://openai.com/index/openai-o3-mini/ Accessed: 2025-04-15." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.784, + 0.482, + 0.824 + ], + "angle": 0, + "content": "[40] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. 2025. RI on incorrect synthetic data scales the efficiency of lll math reasoning by eight-fold. Advances in Neural Information Processing Systems 37 (2025), 43000-43031." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.825, + 0.482, + 0.854 + ], + "angle": 0, + "content": "[41] Dhruv Shah, Blazej Osinski, Sergey Levine, et al. 2023. Lm-nav: Robotic navigation with large pre-trained models of language, vision, and action. In Conference on robot learning. PMLR, 492–504." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.854, + 0.482, + 0.895 + ], + "angle": 0, + "content": "[42] Alessandro Suglia, Claudio Greco, Katie Baker, Jose L Part, Ioannis Papaioannou, Arash Eshghi, Ioannis Konstas, and Oliver Lemon. 2024. Alanavlm: A multimodal embodied ai foundation model for egocentric video understanding. arXiv preprint arXiv:2406.13807 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.11, + 0.482, + 0.895 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.11, + 0.913, + 0.14 + ], + "angle": 0, + "content": "[43] Guangzhi Sun, Yudong Yang, Jimin Zhuang, Changli Tang, Yixuan Li, Wei Li, Zejun MA, and Chao Zhang. 2025. video-SALMONN-01: Reasoning-enhanced Audio-visual Large Language Model. arXiv preprint arXiv:2502.11775 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.141, + 0.913, + 0.18 + ], + "angle": 0, + "content": "[44] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. 2023. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.18, + 0.913, + 0.211 + ], + "angle": 0, + "content": "[45] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with lms. arXiv preprint arXiv:2501.12599 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.211, + 0.913, + 0.231 + ], + "angle": 0, + "content": "[46] Qwen Team. 2024. Qwen-VL-Max. https://qwenlm.github.io/blog/qwen-vl-max/. Accessed: 2025-04-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.231, + 0.913, + 0.251 + ], + "angle": 0, + "content": "[47] Qwen Team. 2024. QwQ: Reflect Deeply on the Boundaries of the Unknown. https://qwenlm.github.io/blog/qwq-32b-preview/" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.251, + 0.913, + 0.292 + ], + "angle": 0, + "content": "[48] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.292, + 0.913, + 0.322 + ], + "angle": 0, + "content": "[49] Dennis Ulmer, Martin Gubri, Hwaran Lee, Sangdoo Yun, and Seong Joon Oh. 2024. Calibrating large language models using their generations only. arXiv preprint arXiv:2403.05973 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.322, + 0.913, + 0.372 + ], + "angle": 0, + "content": "[50] Fali Wang, Zhiwei Zhang, Xianren Zhang, Zongyu Wu, Tzuhao Mo, Qiuhao Lu, Wanjing Wang, Rui Li, Junjie Xu, Xianfeng Tang, et al. 2024. A comprehensive survey of small language models in the era of large language models: Techniques, enhancements, applications, collaboration with llms, and trustworthiness. arXiv preprint arXiv:2411.03350 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.372, + 0.913, + 0.412 + ], + "angle": 0, + "content": "[51] Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Sharon Li, and Neel Joshi. 2024. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. Advances in Neural Information Processing Systems 37 (2024), 75392-75421." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.412, + 0.913, + 0.462 + ], + "angle": 0, + "content": "[52] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. 2024. Embodiedscan: A holistic multi-modal 3d perception suite towards embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1975-1976." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.462, + 0.913, + 0.502 + ], + "angle": 0, + "content": "[53] Zhecan Wang, Garrett Bingham, Adams Wei Yu, Quoc V Le, Thang Luong, and Golnaz Ghiasi. 2024. Haloquest: A visual hallucination dataset for advancing multimodal reasoning. In European Conference on Computer Vision. Springer, 288-304." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.503, + 0.913, + 0.542 + ], + "angle": 0, + "content": "[54] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.543, + 0.913, + 0.583 + ], + "angle": 0, + "content": "[55] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-RL: Unleashing LLM Reasoning with Rule-Based Reinforcement Learning. arXiv preprint arXiv:2502.14768 (2025)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.583, + 0.913, + 0.633 + ], + "angle": 0, + "content": "[56] Cheng Xu, Xiaofeng Hou, Jiacheng Liu, Chao Li, Tianhao Huang, Xiaozhi Zhu, Mo Niu, Lingyu Sun, Peng Tang, Tongqiao Xu, et al. 2023. Mmbench: Benchmarking end-to-end multi-modal dnns and understanding their hardware-software implications. In 2023 IEEE International Symposium on Workload Characterization (IISWC). IEEE, 154-166." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.633, + 0.913, + 0.673 + ], + "angle": 0, + "content": "[57] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. 2024. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.673, + 0.913, + 0.704 + ], + "angle": 0, + "content": "[58] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.704, + 0.913, + 0.744 + ], + "angle": 0, + "content": "[59] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 2025. 7B Model and 8K Examples: Emerging Reasoning with Reinforcement Learning is Both Effective and Efficient. https://hkust-nlp.notion.site/simplerl-reason. Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.744, + 0.913, + 0.774 + ], + "angle": 0, + "content": "[60] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. 2025. Rest-mcts*: LIm self-training via process reward guided tree search. Advances in Neural Information Processing Systems 37 (2025), 64735-64772." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.774, + 0.913, + 0.794 + ], + "angle": 0, + "content": "[61] Yiming Zhang, Nicholas Carlini, and Daphne Ippolito. 2023. Effective prompt extraction from language models. arXiv preprint arXiv:2307.06865 (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.794, + 0.913, + 0.844 + ], + "angle": 0, + "content": "[62] Baining Zhao, Jianjie Fang, Zichao Dai, Ziyou Wang, Jirong Zha, Weichen Zhang, Chen Gao, Yue Wang, Jinqiang Cui, Xinlei Chen, and Yong Li. 2025. UrbanVideo-Bench: Benchmarking Vision-Language Models on Embodied Intelligence with Video Data in Urban Spaces. arXiv:2503.06157 [cs.CV] https://arxiv.org/abs/2503.06157" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.844, + 0.913, + 0.875 + ], + "angle": 0, + "content": "[63] Theodore Zhao, Mu Wei, J Samuel Preston, and Hoifung Poon. 2023. Automatic Calibration and Error Correction for Generative Large Language Models via Pareto Optimal Self-Supervision. (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.875, + 0.913, + 0.895 + ], + "angle": 0, + "content": "[64] Karl Zilles and Katrin Amunts. 2010. Centenary of Brodmann's map—conception and fate. Nature Reviews Neuroscience 11, 2 (2010), 139-145." + }, + { + "type": "list", + "bbox": [ + 0.518, + 0.11, + 0.913, + 0.895 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.084, + 0.106, + 0.201, + 0.123 + ], + "angle": 0, + "content": "A Appendix" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.126, + 0.303, + 0.141 + ], + "angle": 0, + "content": "A.1 Dataset Introduction" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.144, + 0.483, + 0.31 + ], + "angle": 0, + "content": "UrbanVideo-Bench: UrbanVideo-Bench is one of the training and testing datasets designed for embodied reasoning (embodied-r). This benchmark was proposed by Tsinghua University in February 2025. It captures two embodied characteristics of urban environments: complex urban scenes featuring dynamic and static elements, and unique aerial navigation scenarios. The dataset consists of 4 categories and 16 tasks, aimed at evaluating Video-LLMs in terms of recall, perception, reasoning, and navigation capabilities. In our paper, we focus on 4 of these complex tasks for reinforcement learning in video-based learning: Landmark Position, Counterfactual Reasoning, Progress Evaluation, and Action Generation, which represent challenging embodied outdoor tasks." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.311, + 0.483, + 0.449 + ], + "angle": 0, + "content": "VSI-Bench: VSI-Bench is another training and testing dataset for embodied reasoning (embodied-r). Proposed by Fei-Fei Li's team at Stanford in December 2024, this benchmark provides high-quality evaluation metrics for assessing the 3D, video-based, visual-spatial intelligence of multimodal large language models (MLLMs). The dataset comprises 2 categories and 8 tasks designed to evaluate key aspects of spatial reasoning. In our paper, we focus on 4 tasks for reinforcement learning in video-based learning: Relative Distance, Relative Direction, Route Planning, and Appearance Order, all of which are categorized as challenging embodied outdoor tasks." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.45, + 0.483, + 0.546 + ], + "angle": 0, + "content": "EgoSchema: EgoSchema is one of the Out-of-Distribution (OOD) datasets utilized to evaluate the generalization capability of our model. This dataset is specifically designed as a long-form video question-answering benchmark, aimed at assessing modern vision and language systems' ability to understand and reason over extended video content. It provides a rigorous evaluation framework for long video understanding tasks." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.546, + 0.483, + 0.63 + ], + "angle": 0, + "content": "MVBench: MVBench is another Out-of-Distribution (OOD) dataset employed to test the generalization capability of our model. MVBench consists of 20 complex video tasks, offering a comprehensive benchmark for evaluating the video understanding capabilities of existing multimodal models. This dataset is designed to address diverse and challenging scenarios in video-based reasoning." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.641, + 0.385, + 0.656 + ], + "angle": 0, + "content": "A.2 Details of Key-Frame Extractor" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.659, + 0.483, + 0.701 + ], + "angle": 0, + "content": "The goal of key-frame extraction is to ensure sufficient information gain between frames while maintaining a certain degree of overlap. The specific process is as follows:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.701, + 0.483, + 0.77 + ], + "angle": 0, + "content": "Step 1: a perspective transformation is used to model the geometric relationship between frames. Assuming \\( f_{t} \\) is a key-frame, to determine whether \\( f_{t + 1} \\) should also be considered a keyframe, keypoints and descriptors are calculated from \\( f_{t} \\) and \\( f_{t + 1} \\) using the Oriented FAST and Rotated BRIEF (ORB) algorithm:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.775, + 0.483, + 0.79 + ], + "angle": 0, + "content": "\\[\n\\text {K e y p o i n t s} _ {t}, \\text {D e s c r i p t o r s} _ {t} = \\mathrm {O R B} (f _ {t}), \\tag {9}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.154, + 0.794, + 0.482, + 0.809 + ], + "angle": 0, + "content": "\\[\n\\text {K e y p o i n t s} _ {t + 1}, \\text {D e s c r i p t o r s} _ {t + 1} = \\operatorname {O R B} \\left(f _ {t + 1}\\right). \\tag {10}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.811, + 0.483, + 0.897 + ], + "angle": 0, + "content": "Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames, identifying corresponding keypoint pairs \\(\\mathbf{l}_t^{\\mathrm{key}}\\) and \\(\\mathbf{l}_{t + 1}^{\\mathrm{key}}\\). Using the matched keypoint pairs, the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix \\(\\mathbf{M}\\), which maps the content of \\(f_{t + 1}\\) to the coordinate space of \\(f_t\\)." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.306 + ], + "angle": 0, + "content": "Step 2: The overlap ratio between two frames is then computed. Assuming the size of each video frame is \\( w \\times h \\), for frames \\( f_{t} \\) and \\( f_{t+1} \\): \\( \\mathbf{l}_{t} = \\{ [0,0], [w,0], [w,h], [0,h] \\} \\) represents the four corner points of \\( f_{t} \\); \\( \\mathbf{l}_{t+1} = \\{ [0,0], [w,0], [w,h], [0,h] \\} \\) represents the four corner points of \\( f_{t+1} \\). Using the homography matrix \\( \\mathbf{M} \\), the corner points \\( \\mathbf{l}_{t+1} \\) of \\( f_{t+1} \\) are transformed into the coordinate space of \\( f_{t} \\): \\( \\mathbf{l}_{t+1,i}' = \\mathbf{M} \\cdot \\mathbf{l}_{t+1,i} \\), where \\( \\mathbf{l}_{t+1,i} = [x,y,1]^T \\) represents the corner points of \\( f_{t+1} \\) in homogeneous coordinates, and \\( \\mathbf{l}_{t+1,i}' = [x',y',w']^T \\) represents the transformed corner points. The transformed points are further normalized to recover 2D coordinates, resulting in a quadrilateral representing \\( f_{t+1} \\) in \\( f_{t} \\)'s space. In \\( f_{t} \\)'s coordinate space, there are two polygons: Polygon \\( L_{t} \\) is defined by the corner points \\( \\mathbf{l}_{t} \\) of \\( f_{t} \\); Polygon \\( L_{t+1}' \\) is defined by the transformed corner points \\( \\mathbf{l}_{t+1}' \\). Thus, the overlap ratio \\( c \\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.648, + 0.315, + 0.913, + 0.348 + ], + "angle": 0, + "content": "\\[\nc = \\frac {\\operatorname {A r e a} \\left(L _ {t} \\cap L _ {t + 1} ^ {\\prime}\\right)}{\\operatorname {A r e a} _ {\\text {t o t a l}}}. \\tag {11}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.356, + 0.915, + 0.508 + ], + "angle": 0, + "content": "If \\( c \\) is less than a predefined threshold \\( \\varepsilon \\), it indicates significant visual changes between the frames, and \\( f_{t+1} \\) is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between \\( f_t \\) and \\( f_{t+2} \\). This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as \\( \\mathbf{f}' = [f_{k_0}, f_{k_1}, \\dots, f_{k_n}] \\), the keyframe extraction process can be summarized as:" + }, + { + "type": "equation", + "bbox": [ + 0.651, + 0.516, + 0.913, + 0.53 + ], + "angle": 0, + "content": "\\[\n\\mathbf {f} ^ {\\prime} = \\mathrm {K} - \\text {E x t r a c t o r} (\\mathbf {f}). \\tag {12}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.546, + 0.785, + 0.562 + ], + "angle": 0, + "content": "A.3 Details of Data Preparation" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.564, + 0.913, + 0.619 + ], + "angle": 0, + "content": "A.3.1 Task Selection Criteria. In our study, we carefully selected specific tasks that emphasize spatial reasoning capabilities during motion within three-dimensional physical space. The selection process was guided by several key considerations:" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.62, + 0.915, + 0.744 + ], + "angle": 0, + "content": "Focus on Reasoning Processes: We prioritized tasks that require deep cognitive processing rather than simple recognition or recall. As highlighted in the main text, embodied spatial reasoning involves complex spatio-temporal relationships where agents must discover object associations across frames and extract task-relevant semantics. For instance, navigation tasks require agents to infer their location from historical observations, construct mental maps, develop high-level plans, and determine specific actions—processes that demand sophisticated reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.744, + 0.915, + 0.827 + ], + "angle": 0, + "content": "Diversity in Spatial Contexts: To ensure comprehensive evaluation, we selected tasks from both indoor (VSI-Bench) and outdoor (UrbanVideo-Bench) environments, providing diverse spatial contexts that test different aspects of embodied reasoning. This diversity is crucial for evaluating the generalizability of our approach across varying spatial scales and environmental complexities." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.828, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Emphasis on Long Reasoning Chains: We specifically targeted tasks characterized by long spatial reasoning chains and historically low accuracy rates. These challenging tasks better demonstrate the value of our \"slow thinking\" approach, which encourages thorough reasoning before generating responses—similar to how" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.083, + 0.105, + 0.483, + 0.133 + ], + "angle": 0, + "content": "Table 4: Hyperparameters used in reinforcement learning training of Embodied-R." + }, + { + "type": "table", + "bbox": [ + 0.158, + 0.146, + 0.411, + 0.3 + ], + "angle": 0, + "content": "
HyperparameterValue
OptimizerAdamW
Learning Rate5e-7
Temperature1.0
Train Batch Size32
Rollout Size8
KL Coefficient0.001
Maximum Response Length2048
Input Length6144
Training Epochs12
" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.315, + 0.483, + 0.342 + ], + "angle": 0, + "content": "recent advances in mathematical and scientific reasoning have benefited from reinforcement learning techniques." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.343, + 0.483, + 0.399 + ], + "angle": 0, + "content": "Deterministic Evaluation: All selected tasks were formulated as multiple-choice question-answering problems to ensure determinism in answers, facilitating both RL training and direct calculation of accuracy for performance evaluation." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.405, + 0.482, + 0.446 + ], + "angle": 0, + "content": "A.3.2 Question Filtering Methodology. To ensure the quality and validity of our dataset, we implemented a rigorous question filtering process:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.447, + 0.483, + 0.529 + ], + "angle": 0, + "content": "Blind Testing Filter: We first evaluated questions using an untrained 7B language model without video input (blind selection). Questions that could be correctly answered without visual information were identified as potentially problematic, as they might rely more on textual patterns or common knowledge rather than genuine spatial reasoning based on video content." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.53, + 0.483, + 0.559 + ], + "angle": 0, + "content": "SFT-based Filtering: After conducting supervised fine-tuning (SFT) without video inputs, we analyzed which question types" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.107, + 0.915, + 0.176 + ], + "angle": 0, + "content": "showed significant improvement in accuracy. Categories where the model's performance increased substantially without visual information were flagged for removal, as this indicated strong correlations between question text and answers that could be exploited without actual spatial reasoning." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.177, + 0.915, + 0.203 + ], + "angle": 0, + "content": "Correlation Analysis: We specifically eliminated question types where:" + }, + { + "type": "text", + "bbox": [ + 0.547, + 0.207, + 0.912, + 0.233 + ], + "angle": 0, + "content": "- The model could achieve high accuracy without accessing video content" + }, + { + "type": "text", + "bbox": [ + 0.547, + 0.234, + 0.913, + 0.261 + ], + "angle": 0, + "content": "- Performance improved dramatically after text-only SFT training" + }, + { + "type": "text", + "bbox": [ + 0.547, + 0.262, + 0.913, + 0.29 + ], + "angle": 0, + "content": "- Question-answer pairs exhibited strong textual patterns that could be exploited without spatial understanding" + }, + { + "type": "list", + "bbox": [ + 0.547, + 0.207, + 0.913, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.292, + 0.915, + 0.376 + ], + "angle": 0, + "content": "This filtering methodology ensured that our final dataset genuinely tests embodied spatial reasoning capabilities rather than linguistic pattern matching or prior knowledge exploitation. By removing questions with strong text-answer correlations, we created a more challenging and valid benchmark that requires models to truly understand spatial relationships from video content." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.387, + 0.735, + 0.403 + ], + "angle": 0, + "content": "A.4 RL Hyperparameters" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.405, + 0.915, + 0.558 + ], + "angle": 0, + "content": "The reinforcement learning (RL) training of Embodied-R requires careful hyperparameter tuning to balance computational efficiency with model performance. We conducted extensive experiments to determine the optimal configuration for our collaborative framework. The key hyperparameters used in our RL training process are summarized in Table 4. These settings were selected to ensure stable training while maximizing the model's embodied reasoning capabilities. Notably, we used a relatively small learning rate (5e-7) to prevent catastrophic forgetting and a moderate KL coefficient (0.001) to maintain proximity to the reference model while allowing sufficient exploration." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_origin.pdf b/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d1ceb7b82a4c819a30246b864941df32b4ff38ec --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/04e3b704-b5e8-4635-9499-209e4f014c85_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1695b1f4306c0adc2e481c9f0520ba32ea19e2280a6a56f31e5bac5ffb4236c6 +size 28081509 diff --git a/data/2025/2504_12xxx/2504.12680/full.md b/data/2025/2504_12xxx/2504.12680/full.md new file mode 100644 index 0000000000000000000000000000000000000000..86ce284a79d567803f14700f2bd58e278a03145e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/full.md @@ -0,0 +1,522 @@ +# Embodied-R: Collaborative Framework for Activating Embodied Spatial Reasoning in Foundation Models via Reinforcement Learning + +Baining Zhao*, Ziyou Wang*, Jianjie Fang*, Chen Gao†, Fanghang Man, Jinqiang Cui, Xin Wang, Xinlei Chen†, Yong Li, Wenwu Zhu + +Tsinghua University + +![](images/260ca3540d2ad6cd03f0d4ebb90864c70e7bb2a3f3d5777757b66e28572d71f1.jpg) + +Project Page + +![](images/7466a5b5cddbe1975919ac7f8fb16269bedbff7a95fb25f6839397ba74145802.jpg) + +Code + +# Tasks + +![](images/b26be9858af5be7bd451a3520f190c77d197a842af79e537534c1d3cdadee387.jpg) + +① Landmark Position + +what is your current position relative to [landmark] in + +[navigation instruction] + +② Counterfactual Reasoning + +Can you still reach destination if moving in another direction? + +3Progress Evaluation + +Which step the navigation is currently perform in + +4 Action Generation + +What is your next action given [navigation instruction]? + +$⑤$ Relative Distance + +Which object is the closest to [object A]? + +6 Relative Direction + +If you are standing by [object A] and facing [object B], is + +the [object C] to your left, right, or back? + +7 Route Planning + +How to navigate to [object A]? + +Appearance Order + +What will be the first-time appearance order of [object A, + +object B, object C] in your memory? + +·· + +![](images/8066252818aa0f756bf50e09469d3ee84df1cccc7b9c939aa9775510b37a56f7.jpg) +Embodied-R + +# Reasoning + +![](images/a7afab221f6d6c3288ef9526fa3ff072372cb905f27e3f3fa7b35a2294a9b74a.jpg) + +[Example] Question: Navigation Instruction given at initial position: [Observe around, then fly towards the road, then turn left and land on the roof of the building on the left]. What are you doing now? + +Choose: A. I look around the square area. B. I fly towards the road. + +C. I turn left and land on the roof of the building on the left. + +D. I fly over the park. E. I land. + +E.I land + +![](images/8fe50a38bdcaff17c4598a1be54b92b276eda1fc2d6c11ae4b445b89417288e8.jpg) + +Fast-Thinking + +Slow-Thinking + +Think: Let's analyze the event sequence and the instruction step by step: + +1. Initial Position: I start by observing around the square area. + +2. Next Step: After observing, I fly towards the highway. + +3. Subsequent Step: Once at the highway, I should turn left. + +4. Final Step: Finally, I should land on the roof of the building on the left + +From the video frames: + +- The initial frames show buildings and a square area, indicating the phase. + +- The subsequent frames show a transition from the square area to a broader view. + +- The later frames depict a clear view of the road and surrounding buildings. + +![](images/6b2092ae3ac9d8a38a7043afa27952f1f670fafcd1820ad106db5ceb62650bc1.jpg) +Figure 1: Embodied spatial reasoning: tasks and thinking process. Challenging tasks from public embodied video datasets are identified, encompassing both indoor and outdoor scenarios. We introduce slow-thinking to improve reasoning performance. + +Answer: B. I fly towards the road. + +![](images/0690f619416d8140759d8bfe85c98be41326383f16d97579a0edd83c69a8451f.jpg) + +# Abstract + +Humans can perceive and reason about spatial relationships from sequential visual observations, such as egocentric video streams. However, how pretrained models acquire such abilities, especially high-level reasoning, remains unclear. This paper introduces Embodied-R, a collaborative framework combining large-scale Vision-Language Models (VLMs) for perception and small-scale Language Models (LMs) for reasoning. Using Reinforcement Learning (RL) with a novel reward system considering think-answer logical consistency, the model achieves slow-thinking capabilities with limited computational resources. After training on only 5k embodied video samples, Embodied-R with a 3B LM matches state-of-the-art multimodal reasoning models (OpenAI-o1, Gemini-2.5-pro) on both in-distribution and out-of-distribution embodied spatial reasoning tasks. Embodied-R also exhibits emergent thinking patterns such as systematic analysis and contextual integration. We further explore research questions including response length, training on VLM, strategies for reward design, and differences in model generalization after SFT (Supervised Fine-Tuning) and RL training. + +# 1 Introduction + +On the path toward Artificial General Intelligence (AGI) [17], we hope that pre-trained foundation models can not only perform tasks such as dialogue and image understanding in the cyber world [2, 44] + +but also develop human-like embodied spatial cognition in the three-dimensional physical world, enabling them to perceive, think, and move [4, 32]. The fundamental way humans achieve spatial cognition is through continuous, dynamic visual observations, akin to video streams [26, 30]. For example, by observing their surroundings, humans can infer their position relative to nearby objects. Similarly, based on historical visual observations, humans can determine the actions they should take to reach a target destination. + +Visual spatial cognition can be divided into two levels: perception and reasoning [51]. Perception refers to "what is seen", characterized by direct, low-level tasks such as object recognition, edge detection, or color differentiation [52]. Reasoning, on the other hand, involves "what is understood" and "what actions to take", which are indirect and higher-level tasks requiring logical inference and knowledge integration [62]. Examples of reasoning include "Where did I come from?" (e.g., recalling historical movement trajectories [36]), "Where am I?" (e.g., inferring the spatial relationships between nearby objects and distances [5]), and "Where do I want to go?" (e.g., planning actions and deciding movements to reach a destination [8]). While most existing research focuses on improving the perception capabilities of foundation models [6, 11], with notable progress, their spatial reasoning abilities remain limited [9, 58], and methods for enhancement are largely unexplored. + +Specifically, video-based spatial reasoning poses several challenges, as follows: + +- Reasoning is always built upon perception [19, 32]. For the studied problem, continuous visual observations impose higher demands on perception. Reasoning cannot be well achieved with faulty perceptions or hallucinations [53]. It is challenging to reason when it is already hard to perceive from the videos. +- Video data naturally involves complex spatio-temporal relationships, requiring the discovery of object associations across frames and the extraction of semantics relevant to the reasoning task [16]. For instance, to navigate to a destination outside the current field of view, one must infer their location from historical visual observations, build a mental map of the environment, develop a high-level plan to determine the direction, and finally decide on specific actions to execute. Existing supervised fine-tuning (SFT) training methods lack supervision for the reasoning process, making it difficult to handle such reasoning tasks [62]. +- Embodied visual observations have distinct characteristics. First, understanding disembodied videos, such as movies or TV shows, primarily emphasizes the content within the video, often from a broad and objective perspective [27]. In contrast, egocentric videos focus on understanding the relationship between the observer and the surrounding environment, often from a constrained first-person perspective [22]. Second, embodied continuous visual observations are generated over time, indicating that embodied perception should rely on sequential inputs rather than aggregating all visual observations for a single input after a prolonged period [31]. Finally, due to the continuity of motion in the physical world, egocentric visual observations also exhibit spatial continuity, meaning there is significant redundancy and repetition between frames. Consequently, directly applying existing multimodal large language models (MLLMs) to embodied videos leads to issues, including loss of generalization and input token limits caused by excessive redundant frames [1, 29]. + +Recently, the impressive performance of OpenAI's o1/o3 [38] and DeepSeek-R1 [24] in solving complex reasoning problems(e.g., mathematics, coding, science, etc.) has drawn attention to reinforcement learning (RL) techniques. By incorporating the chain-of-thought (CoT) reasoning process into post-training, large language models (LLMs) demonstrate a "slow-thinking" mode, where they reason thoroughly before generating responses [45, 55]. Inspired by this, we attempt to introduce "slow thinking" into embodied video-based spatial reasoning tasks, as shown in Figure 1. + +This brings a new challenge: the trade-off between model size and computational cost. Existing studies suggest a strong correlation between multimodal understanding/perception capabilities and model size [7, 20, 56]. Since reasoning builds on perception, larger vision-language foundation models should be used as the starting point for training. However, increasing model size leads to often unacceptable computational costs. Additionally, video inputs map to long token sequences, further raising computational demands. Is there a way to leverage the perception capabilities of large-scale models while developing embodied reasoning abilities at a lower computational cost? + +Inspired by neuroscience [64], spatial perception and reasoning involve distinct brain regions: visual perception occurs in the visual areas of the occipital lobe [13], basic spatial understanding in the parietal lobe [18], and complex spatial reasoning in the prefrontal + +cortex [14]. This inspired the design of a collaborative framework with two main components: a large-scale vision-language model (VLM) for perception and a small-scale language model (LM) for reasoning. Based on the continuity of observations, we first propose a key-frame extractor to retain critical information while reducing computational costs. Using a VLM, we sequentially extract semantic information from the frames, which simulates real-world online reasoning while effectively managing the input token length of VLMs for long video inputs. Finally, the semantic information and reasoning question are fed into the small-scale language model, which outputs the reasoning process and final answers. The small-scale language model is trained with RL, where the reward modeling not only incorporates rule-based rewards inspired by Deepseek-R1-Zero [24] but, more importantly, introduces a novel reward for the logical consistency of the reasoning process. In the experiments, we explore seven research questions, covering the framework's performance, RL's role in activating embodied spatial reasoning, and out-of-distribution generalization capabilities. + +In general, the main contributions of this paper are as follows: + +- We propose a collaborative framework for large-scale and small-scale foundation models to address spatial reasoning in the video modality. By decoupling perception and reasoning, the framework leverages the perceptual strength of large-scale foundation models while efficiently enhancing the reasoning capabilities of smaller models in a computationally resource-friendly manner. +- This is the first work to employ reinforcement learning (RL) to enhance the embodied spatial reasoning abilities of foundation models. Specifically, we introduce a novel logical consistency reward, which improves the alignment between reasoning processes and generated answers. +- Our proposed Embodied-R achieves performance comparable to state-of-the-art multimodal large language models (e.g., OpenAI-o1/Gemini-2.5-Pro) on both in-distribution and out-of-distribution benchmarks. We further investigate research questions including the generalization comparison between models trained by SFT & RL, reward design strategies, etc. + +# 2 Related Work + +Large Language Model Reasoning. Recently, enhancing reasoning capabilities has become a key focus in large model technologies, demonstrating remarkable performance on tasks such as mathematical and logical problem-solving [25, 47, 57]. Following the release of OpenAI's o1 [38], numerous studies have proposed various technical approaches to achieve similar functionalities, including Chain-of-Thought (CoT) [54], Monte Carlo Tree Search (MCTS) [23, 60], distillation [35], rejection sampling combined with supervised fin-tuning (SFT) or Direct Preference Optimization (DPO) [40], among others. Furthermore, Deepseek-r1 [24] introduced a method to foster the emergence of reasoning abilities in large language models (LLMs) through rule-based rewards combined with reinforcement learning. Similarly, Kimi k1.5 [45] proposed a comparable approach, presenting various training techniques, such as curriculum learning. This reinforcement learning paradigm has sparked significant interest, with subsequent works successfully reproducing related results [55, 59]. + +Embodied Spatial Reasoning with VLMs. Inspired by the generality of foundation models across various domains [2, 3], embodied intelligence aims to develop agents that utilize large multimodal models as their "brains" to achieve perception, navigation, and manipulation in the 3D physical world [15, 41]. In terms of input, human visual-spatial perception is more akin to continuous RGB observations, similar to video streams [12, 42], rather than static images [48] or point clouds [52]. Several embodied video benchmarks [58] demonstrate that, while perception tasks are relatively well-addressed, spatial reasoning tasks—such as spatial relationship inference, navigation, and planning—remain highly challenging. However, existing research [16, 43] on video reasoning primarily focuses on disembodied content reasoning, with little emphasis on scenarios involving embodied continuous visual inputs. + +Collaboration between large and small models. Existing research primarily focuses on addressing the resource consumption and privacy risks associated with large models, as well as the efficiency and performance advantages of small models in specific scenarios [50]. Small models can assist large models in data selection, prompt optimization, and reasoning enhancement [28, 61]. The use of small models to detect hallucinations and privacy leakage is explored in [49, 63], improving overall system reliability. While our work shares the goal of reducing computational resource demands, it differs by emphasizing the complementary roles of large-scale VLMs in perception and small-scale LMs in enhancing embodied spatial reasoning. + +# 3 The Embodied-R Method + +We first define the problem of embodied spatial reasoning. Subsequently, we introduce the VLM-based perception module and the LM-based reasoning module. The collaborative framework is shown in Figure 2. + +# 3.1 Problem Formulation + +In the physical world, an agent moves through space, generating a sequence of video frames (continuous visual observations) $\mathbf{f} = [f_0, f_1, \dots, f_T]$ . Suppose a spatial reasoning problem is denoted as $q$ . Our goal is to build a model that takes $q$ and $\mathbf{f}$ as inputs and outputs an answer $a$ . The answer $a$ is considered correct if it is semantically consistent with the ground truth $g$ ; otherwise, it is deemed incorrect. + +# 3.2 Large-Scale VLM-based Perception + +3.2.1 Key-Frame Extractor. As the agent moves continuously in space, high sampling frequencies result in significant overlap between consecutive frames. On one hand, the VLM relies on changes in the static objects within the environment across frames to infer the agent's pose variation. On the other hand, excessive overlap between frames leads to increased inference costs for both the VLM and LLM. To address this, we designed a key-frame extractor tailored to the characteristics of embodied videos, selecting key frames that retain overlap while ensuring sufficient information gain between them. + +The extraction of key-frames is based on the overlap of visual fields caused by motion continuity. When the agent moves forward, the visual content in the latter frame is expected to overlap with a + +portion of the former frame, and the reverse is true when moving backward. Similarly, during left or right rotations, the latter frame should partially overlap with the former frame in the horizontal direction, and during upward or downward rotations, the overlap occurs in the vertical direction. Given that the sampling frequency of visual observations is typically much higher than the agent's motion speed, frames generally exhibit significant overlap. + +Specifically, a perspective transformation is used to model the geometric relationship between frames. Assuming $f_{t}$ is a key-frame, to determine whether $f_{t+1}$ should also be considered a keyframe, keypoints and descriptors are calculated from $f_{t}$ and $f_{t+1}$ using the Oriented FAST and Rotated BRIEF (ORB) algorithm. Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames and the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix. The overlap ratio between two frames is then computed. If overlap ratio is less than a predefined threshold, it indicates significant visual changes between the frames, and $f_{t+1}$ is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between $f_{t}$ and $f_{t+2}$ . This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as $\mathbf{f}' = [f_{k_0}, f_{k_1}, \dots, f_{k_n}]$ , the keyframe extraction process can be summarized as: + +$$ +\mathbf {f} ^ {\prime} = \mathrm {K} - \operatorname {E x t r a c t o r} (\mathbf {f}). \tag {1} +$$ + +3.2.2 Embodied Semantic Representation. Since perceptual capability is positively correlated with model size [27, 58, 62], we employ a large-scale VLM to process visual inputs to ensure high-quality perception. The differential information of each key frame is described sequentially. This approach provides two key benefits: 1) The sequential and dynamic processing aligns better with the characteristics of embodied scenarios, where visual observations are continuously generated over time. At each moment, the model should integrate historical semantic representations with the latest visual observations, rapidly updating the semantic understanding of spatial perception. 2) It facilitates the handling of long videos by avoiding the input token limitations that arise when all frames are processed simultaneously by the VLM. + +Specifically, for the first frame, the VLM identifies the objects present in the scene, their attributes, and their spatial locations. For subsequent frames, both the previous frame and the current frame are input into the VLM to extract key semantic representation $s_{k_j}$ : + +$$ +s _ {k _ {j}} \sim \psi_ {\theta} (s | f _ {k _ {j - 1}}, f _ {k _ {j}}; q), j = 1, 2, \dots , n, \tag {2} +$$ + +where $s_{k_j}$ consists of three items: + +- Action: Inferring the agent's actions based on the changes in visual observations between consecutive frames. +- $\Delta$ Information: Determining changes in the spatial relationships between the agent and known objects, as well as identifying whether new objects appear in the field of view. +- $q$ -related content: Detecting whether objects or information relevant to the reasoning task appear in the latest field of view. + +![](images/b4adaf2d4183da6749bf4525fe2d5c946fa39eb4214e564e23bd8a915e471952.jpg) +Figure 2: The proposed Embodied-R is a collaborative embodied spatial reasoning framework integrating a Vision-Language Model (VLM) and a Language Model (LM). The separation of perception and reasoning enables us to leverage the perceptual capabilities of large-scale VLMs while training a resource-efficient small-scale LM to activate embodied reasoning through RL. Notably, we introduce a novel logical consistency reward to guide the LM in producing logically coherent reasoning and answer. + +In this way, we can extract spatial semantic representations $\mathbf{s} = [s_{k_0}, s_{k_1}, \dots, s_{k_n}]$ from the keyframe $\mathbf{f}'$ . + +# 3.3 Small-Scale LM-based Reasoning + +Given semantic perception, we can train a training-friendly small-scale language model capable of performing embodied spatial reasoning. Assuming the small-scale LM is denoted as $\pi_{\theta}$ , the response $o$ inferred from the model can be expressed as: $o \sim \pi_{\theta}(o \mid q, s)$ . + +Our training objective is to ensure that the model adheres to the "think-then-answer" paradigm, where the thinking process is logical, and the answer is correct. We follow DeepSeek-R1-Zero and adopt a computationally efficient RL training strategy, Group Relative Policy Optimization (GRPO). Besides rule-based format and accuracy rewards, we propose a novel reasoning process reward tailored for embodied reasoning tasks to mitigate reward hacking and enhance the logical consistency between the reasoning process and the final answer. + +3.3.1 Group Relative Policy Optimization. For a given query $q$ and semantic annotation $s$ , GRPO generates a group of outputs $\{o_1, o_2, \dots, o_G\}$ using the reference policy $\pi_{\mathrm{ref}}$ . The reference policy typically refers to the original model not trained via GRPO. The policy model $\pi_\theta$ is then updated by optimizing the following objective: + +$$ +\begin{array}{l} \mathcal {J} (\theta) = \mathbb {E} _ {(q, s) \sim \mathbb {D}, \{o _ {i} \} _ {i = 1} ^ {G} \sim \pi_ {\text {o l d}} (o | q, s)} \left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \left(\min \left(\frac {\pi_ {\theta} (o _ {i} | q , s)}{\pi_ {\text {o l d}} (o _ {i} | q , s)} A _ {i}, \right. \right. \right. \tag {3} \\ \left. \operatorname {c l i p} \left(\frac {\pi_ {\theta} (o _ {i} | q , \mathbf {s})}{\pi_ {\mathrm {o l d}} (o _ {i} | q , \mathbf {s})}, 1 - \epsilon , 1 + \epsilon\right) A _ {i}\right) - \beta \mathcal {D} _ {\mathrm {K L}} (\pi_ {\theta} \| \pi_ {\mathrm {r e f}})) \Biggr ], \\ \end{array} +$$ + +where $\epsilon$ and $\beta$ are hyperparameters, and $\mathcal{D}_{\mathrm{KL}}(\pi_{\theta}||\pi_{\mathrm{ref}})$ is KL divergence penalty: $\mathcal{D}_{\mathrm{KL}}(\pi_{\theta}||\pi_{\mathrm{ref}}) = \pi_{\mathrm{ref}}(r_i|q,\mathbf{s})\log \frac{\pi_{\mathrm{ref}}(r_i|q,\mathbf{s})}{\pi_{\theta}(r_i|q,\mathbf{s})} -1.$ $A_{i}$ represents the advantage corresponding to the output $o_i$ , calculated from the corresponding $\{r_1,r_2,\dots ,r_G\} :A_i = \frac{r_i - \mathrm{mean}(\{r_1,r_2,\dots,r_G\})}{\mathrm{std}(\{r_1,r_2,\dots,r_G\})}$ + +3.3.2 Reward Modeling. Reward modeling is a critical component of RL algorithms, as their design guides the direction of model optimization. We propose three types of rewards: format reward, accuracy reward, and logical consistency reward. These are designed to respectively guide the model to learn the "think-answer" reasoning pattern, accurate embodied spatial reasoning, and logical consistency between reasoning and the answer. + +Format Reward: We aim for the model to output $o_i$ by first producing an embodied reasoning process $p_i$ followed by the final answer $a_i$ . The reasoning process and answer are enclosed within $
$ and $
$ tags, respectively: + +Please assume the role of an agent. Given a question and a series of frames, you should first think about the reasoning process in the mind and then provide the final answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Ensure that your answer is consistent with and directly derived from your thinking process, maintaining logical coherence between the two sections. The frames represent your egocentric observations from the past to the present. Question: q. Video: f'. Assistant: + +A regular expression is applied to evaluate whether $o_i$ meets the specified requirements, thereby generating the format reward $r_i'$ : + +$$ +r _ {i} ^ {\prime} = \left\{ \begin{array}{l l} 1, & \text {i f f o r m a t i s c o r r e c t ;} \\ 0, & \text {i f f o r m a t i s i n c o r r e c t .} \end{array} \right. \tag {4} +$$ + +Accuracy Reward: The accuracy reward $r_i^{\prime \prime}$ model assesses whether the answer $a_i$ is semantically consistent with the ground truth $g$ . For example, multiple-choice questions typically have precise and unique answers, which can be easily extracted when the response adheres to the specified format. + +$$ +r _ {i} ^ {\prime \prime} = \left\{ \begin{array}{l l} 1, & a _ {i} = g; \\ 0, & a _ {i} \neq g. \end{array} \right. \tag {5} +$$ + +Logical Consistency Reward: When using only the format reward and accuracy reward, we consistently observed hacking behaviors. Specifically, for spatial reasoning tasks where the possible answers are limited (e.g., the relative position of an object with respect to the agent's body), cases arise where an incorrect reasoning process $p_i$ leads to a correct answer $a_i$ , which is mistakenly assigned a positive reward. As such cases accumulate, the logical consistency of the model's responses deteriorates. To address this issue, we introduce a simple yet effective process reward. Our goal is to ensure a lower bound on logical consistency, such that the reasoning ability of $\pi_{\theta}$ should not degrade below that of the reference model $\pi_{\mathrm{ref}}$ . Therefore, when the model's answer is correct $(a_i = g)$ , we input the question $q$ and reasoning process $p_i$ into the reference model without providing video frames, yielding an answer: + +$$ +a _ {i} ^ {\prime} \sim \pi_ {\text {r e f}} (a | q, p _ {i}). \tag {6} +$$ + +If $a_i'$ is consistent with $a_i$ , it indicates that the reasoning process can logically lead to the answer; otherwise, it reflects a logical inconsistency between the reasoning process and the answer. + +$$ +r _ {i} ^ {\prime \prime \prime} = \left\{ \begin{array}{l l} 1, & a _ {i} = a _ {i} ^ {\prime} = g; \\ 0, & \text {e l s e .} \end{array} \right. \tag {7} +$$ + +Total Reward: The total reward is a linear combination of the three rewards mentioned above: + +$$ +r _ {i} = \omega_ {1} r _ {i} ^ {\prime} + \omega_ {2} r _ {i} ^ {\prime \prime} + \omega_ {3} r _ {i} ^ {\prime \prime \prime}. \tag {8} +$$ + +# 4 Experiments + +We first provide the details of the experimental setup and then demonstrate the following: quantitative results, qualitative results, + +and ablation studies. These correspond to addressing the following three research questions (RQs): + +- RQ1: How does Embodied-R perform compared to existing video-LLMs? +- RQ2: Has Embodied-R learned slow-thinking? +- RQ3: What are the contributions of each module? + +# 4.1 Experimental Setup + +4.1.1 Data Preparation. We primarily focus on spatial reasoning problems during motion within three-dimensional physical space to evaluate the effectiveness of our method. For this purpose, we selected two embodied video datasets as the main training and testing sets: VSI-Bench [58], which contains indoor first-person navigation data., and UrbanVideo-Bench [62], which consists of outdoor embodied data captured by drones navigating through aerial spaces. These datasets provide diversity in scenarios by incorporating both outdoor and indoor video data. Based on the content of the tasks, we specifically selected four distinct types of tasks from each dataset, characterized by long spatial reasoning chains and low accuracy. These tasks are formulated as multiple-choice question-answering problems, ensuring determinism in answers to facilitate RL training and allowing direct calculation of accuracy to evaluate performance. Across eight task categories, the dataset covers multiple levels of spatial reasoning, comprising a total of 5,415 QA pairs and 1,492 videos. Additionally, we include two out-of-distribution dataset, EgoSchema [34] and Egocentric task in MVBench [27]. EgoSchema is designed for task-level reasoning from a first-person perspective, with 500 QA pairs and 500 videos available in its fully open-source portion. MVBench encompasses the embodied task of egocentric navigation, comprising 200 QA pairs and 200 corresponding videos. These datasets serve to evaluate the generalization capability of the trained model. + +To ensure comprehensive evaluation, we conducted five repeated experiments. The dataset was randomly divided into five equal parts and 5-fold cross-validation is adopted. The final testing results are averaged across the five experiments. Furthermore, we address the issue of potential semantic bias in the datasets. For instance, in action generation tasks, forward movement may inherently have a higher correctness rate than adjusting the gimbal angle, which is a characteristic of the task itself. To prevent the testing performance from being influenced by the model learning textual distribution rather than truly understanding the spatial information in video, we implement an additional filtering step for the testing set. Specifically, we train a LLM through supervised fine-tuning using only the textual QA pairs from the training set, without video inputs. If a question in the testing set can be correctly answered by the finetuned LLM but not by the original LLM, it indicates semantic bias in that QA pair. These biased QA pairs are excluded from the testing set as they fail to accurately assess the spatial reasoning capabilities of models. + +4.1.2 Implementation Details. We use Qwen2.5-3B-Instruct [57] as the small-scale LM and Qwen2.5-VL-72B-Instruct [6] as large-scale VLM. Both training and inference processes were conducted using 8 NVIDIA A800-SXM4-40GB GPUs, with each RL training requiring approximately 90 GPU hours. Other key hyperparameters for training are as follows: learning rate: 5e-7, temperature: + +Table 1: Accuracy of Embodied-R and baselines on 8 indoor and outdoor embodied spatial reasoning tasks. The baselines include popular proprietary models, state-of-the-art (SOTA) multimodal reasoning models, open-sourced video-large language models, and models fine-tuned on the same training dataset. + +
MethodAvg.UrbanVideo-BenchVSI-Bench
Landmark PositionCounterfactualProgress EvaluationAction GenerationRelative DistanceRelative DirectionRoute PlanningAppearance Order
Random24.019.725.021.816.425.036.128.325.0
Proprietary Models (API)
Qwen-VL-Max[32f]34.144.849.238.829.628.033.329.628.3
GPT-4o[32f]35.736.844.734.233.837.041.331.528.5
Gemini-1.5-Flash[1fps]38.337.842.443.334.437.741.031.537.8
Gemini-1.5-Pro[1fps]39.737.446.238.831.951.346.336.034.6
SOTA Reasoning Models (API)
OpenAI-o1[32f]37.234.653.339.128.039.735.852.939.8
Gemini-2.5-Pro[1fps]40.840.075.038.723.542.034.552.463.6
Open-source Models
LLaVA-NeXT-Video-7B-hf[32f]29.549.520.536.619.225.226.329.924.5
Phi-3.5-vision-instruct[32f]29.049.234.833.215.625.426.536.925.2
Kangaroo[64f]30.035.542.432.532.425.226.823.524.9
InternVL2-2B[32]24.519.345.529.220.925.125.032.623.9
InternVL2-8B[32f]25.523.145.531.521.424.725.728.324.8
InternVL2-40B[32f]25.823.241.732.422.324.925.729.424.5
Qwen2.5-VL-3B-Instruct[1fps]33.132.147.834.031.027.932.639.038.9
Qwen2.5-VL-7B-Instruct[1fps]33.333.321.725.027.835.839.748.838.8
Qwen2.5-VL-72B-Instruct[1fps]34.934.734.826.437.740.829.032.543.9
Supervised Fine-Tuning
Qwen2.5-VL-3B-Instruct[1fps]41.747.733.434.839.242.642.341.243.9
Qwen2.5-VL-7B-Instruct[1fps]45.440.253.438.040.847.846.344.156.1
Proposed Embodied-R
VLM-72B + LLM-3B [≤32f]51.155.159.939.747.650.044.336.872.0
+ +![](images/9c8f658d66e081cf723488b6f76f91ee89abd848c964038024d4a57588ef7cd3.jpg) + +
GPT-4oQwen2.5-VL-72B
Openai-o1Qwen2.5-VL-3B
Gemini-1.5-ProQwen2.5-VL-3B-SFT
InternVL2-40BEmbodied-R
+ +Table 2: Ablation of Key-Frame Extractor + +
Avg. FrameAcc.Training TimeInference Time
w/o3251.1127.87 h243.68 s
w20.7↓11.349.5↓1.6111.70h↓16.17157.55s↓86.13
+ +Table 3: Ablation of Collaboration. + +
Avg.LPCPEAGRDistRDirRPAO
w/o34.831.845.728.328.141.029.737.546.0
w51.155.159.939.747.650.044.336.872.0
Δ+16.3+23.3+14.2+11.4+19.5+9.0+14.6-0.7+26.0
+ +1.0, train batch size: 32, rollout size: 8, KL coefficient: 0.001, maximum response length: 2048, input length: 6144. When conducting inference on the test set, the temperature is set to 0.5. + +4.1.3 Three-Stage Training Schedule. As for the RL training on the LM, we design a three-stage training schedule to achieve a smooth improvement in training performance. The primary distinction between stages lies in the different weight ratios assigned to three types of rewards. + +- Stage 1: In epochs 1 and 2, the goal is to guide the model to follow the "" output format. At this stage, the weights are set as $\omega_{1}:\omega_{2}:\omega_{3} = 7:3:0$ . Correct format rewards also assist in locating the answer and reduce misjudgment in accuracy. During this phase, the format reward rapidly converges to 1. +- Stage 2: In epochs 3 and 4, the focus shifts to improving the accuracy of the model's responses, guiding the model to produce correct reasoning answers. The weights are set as $\omega_{1}:\omega_{2}:\omega_{3} = 3:7:0$ . + +- Stage 3: In subsequent 5-12 epochs, the aim is to enhance accuracy while simultaneously improving the quality of the "thinking" process, ensuring logical consistency between thinking and the answer. The weights are set as $\omega_{1}:\omega_{2}:\omega_{3} = 1:7:2$ . + +# 4.2 How Does Embodied-R Perform Compared to Existing Video-LLMs? + +To evaluate the effectiveness of the proposed method, in addition to the random baseline, we introduced four categories comprising 17 multimodal large language models capable of processing video inputs: + +- Proprietary Models: Cost-effective multimodal models with over 100B parameters, including Qwen-VL-Max [46], GPT-4o [37], Gemini-1.5-Flash [44], and Gemini-1.5-Pro [44]. +- SOTA Reasoning Models: State-of-the-art reasoning models with the highest performance but significant computational cost, including OpenAI-o1 [38] and Gemini-2.5-Pro [21]. + +![](images/c318cdb8f4090e0a3adc8de0a1fd78af3544fb0478b79abbbdcf108a3971a0d2.jpg) +Figure 3: Case Analysis: Embodied-R has initially developed the ability for slow-thinking: it can think before answering, effectively distinguish spatial relationships, provide structured and organized responses, and integrate information across multiple frames for embodied scene analysis. + +![](images/2b9e5f7a41386ace07624dec3ec03897c00121bef7fd942a2b707bf0ce1c5755.jpg) + +![](images/62b0a49ba975743ab2da724498d96bedd02c9a57a3ea5856833650e9fef3dfdb.jpg) +Figure 4: Ablation of RL training and comparison to other language models. + +- Open-Source Models: Popular open-source multimodal models, including LLaVA-NeXT-Video-7B-hf [29], Phi-3.5-vision-instruct [1], the Internvl2 series [11], and the Qwen-VL series [6]. +- Supervised Fine-Tuning (SFT): Considering the scarcity of embodied video tasks, the aforementioned models may lack exposure to relevant data. Therefore, Qwen2.5-VL-3B-Instruct [6] and Qwen2.5-VL-7B-Instruct [6] are fine-tuned for these tasks. The results presented in Table 1 lead to the following conclusions: +- After undergoing RL training on embodied reasoning tasks, our model significantly outperformed proprietary models as well as OpenAI-o1 and Gemini-2.5-Pro by over $10\%$ . Moreover, it consistently demonstrated leading performance across various tasks. These results highlight the considerable difficulty of embodied reasoning tasks and indicate that current reasoning models lack generalization capability for such spatial reasoning challenges. On the other hand, the findings confirm that collaborative framework with RL can effectively enhance model reasoning performance in specific domains, especially for tasks that remain poorly solved. + +- For embodied video reasoning, a highly coupled perception-reasoning problem, the VLM model Qwen2.5-VL-72B-Instruct achieved an accuracy of only $34.9\%$ through direct inference. In contrast, incorporating a small-scale LM model improved accuracy to $51.1\%$ . Given limited computational resources for training, the collaborative framework proposed in this study provides an effective solution for balancing model size with hardware constraints. +- Under similar computational resource limitations, direct fine-tuning is restricted to models with a size of 7B or smaller. However, the perceptual capacity of small-scale VL models imposes a low upper bound on accuracy compared to Embodied-R. Additionally, fine-tuned models lack the capability for slow-thinking. + +# 4.3 Has Embodied-R Learned Slow-Thinking? + +Beyond the quantitative results, we aim to explore whether spatial reasoning capabilities in the output of Embodied-R are improved. As illustrated in Figure 3, after RL training, Embodied-R demonstrates the following human-like reasoning ways: + +- Spatial Relationship Reasoning: Accurately inferring the relative spatial relationship between itself and the surrounding environment. +- Systematic Analysis: Breaking down problems into components, presenting answers with a "part-to-whole" structure, and maintaining clear logical organization. +- Contextual Integration: Integrating semantic information across different frames to perform comprehensive analysis. +- Think-Answer Format: Strictly adhering to a structured process of reasoning before outputting the final answer. + +In summary, Embodied-R demonstrates a certain degree of slow-thinking capability in embodied spatial reasoning. + +![](images/b72f62ac79b3b4ecd587477fe5880cff8321027484d74e9a4a7c196167a6ee4f.jpg) + +![](images/02393c833bbae8e6c12fcf40fb48a9b3a5d87263d5f9abc11d381f176dd35a56.jpg) + +![](images/77957a7d881be79b7f92f5a86858a4906ac4cfb5dbbbfe27cc3a19d2ebeec4a2.jpg) + +![](images/5a7204fe5ebc0eb56147d5cc28c1831272e0043c1ada6eb035614ccd44df17a2.jpg) + +![](images/bcdca8529b76556187765c9b5a87cb0912a7dc6dfab13b6c798397055f198039.jpg) +Figure 5: a-d. The GRPO training process (a: accuracy reward; b: format reward; c: ratio of logical consistency reward to accuracy reward; d: response length of validation set). e. Comparison of accuracy reward curves for RL training of equivalently sized LM and VLM models. f. Model performance before and after integrating logical consistency reward. g. Comparison of generalization performance between models trained with RL and SFT. + +![](images/a38e1a2a0590e90f4eb270befb537b4680adaf74a94c8e1149fbd2b6a5e1a4c2.jpg) + +![](images/5b0471981f3a1f19157135899e7655288f65f2ed892ff00e95c43e9724d27af4.jpg) + +# 4.4 Contributions of Each Module + +4.4.1 Ablation of Key-Frame Extractor. The role of Key-Frame Extractor is to reduce inference time and training time by retaining essential frames and removing redundant ones while maintaining perceptual quality. As shown in Table 2, with negligible differences in accuracy, training time is significantly reduced by $8.7\%$ , and single inference time is reduced by approximately one-third. + +4.4.2 Ablation of Collaboration. The collaborative framework enables improved reasoning capabilities under limited computational resources for training. With training-free large-scale pretrained VLMs, it only requires training small-scale LM models to achieve enhanced reasoning performance. As shown in Table 3, with identical key-frame inputs and using the same VLM, Qwen2.5-VL-72B-Instruct, the overall accuracy of collaborative inference is 1.5 times higher than that of the standalone VLM. + +4.4.3 Ablation of RL Training. RL is central to the LM training in this paper. Without RL training, directly applying the original LM-3B model for reasoning leads to poor performance, as the LM has limited exposure to embodied spatial reasoning data during pretraining. After RL training, the LM achieves significant improvements, with a $27.9\%$ increase on the UrbanVideo-Bench and a $20.6\%$ increase on the VSI-Bench benchmarks. + +Given that VLM has already transformed visual inputs into textual representations, we introduced 4 text-based reasoning models (o3-mini [39], Deepseek-R1 [24], Qwen-Max [46], Qwen2.5-7B-Instruct [6]) as baselines to further assess the importance of reasoning capability in the embodied spatial task. The results demonstrate a clear positive correlation between the reasoning ability of the model and its accuracy. The strong performance of Embodied-R may not only stem from its familiarity with the data distribution + +but also from its synergy with the representations provided by the VLM. Following training, the small-scale LM becomes more attuned to the VLM-generated representations, which translates into enhanced performance on embodied reasoning tasks. + +# 5 Further Exploration + +Building upon the aforementioned experiments, we further explore four intriguing RQs related to embodied video-based RL training: + +- RQ4: What Is the Relationship Between Inference Ability, Aha Moments, and Response Length? +- RQ5: Why Not Directly Perform RL Training on VLLMs? +- RQ6: Is Accuracy+Format Rewards All You Need? +- RQ7: RL vs SFT when Generalize to Out-of-Distribution (OOD) Embodied Tasks? + +# 5.1 Relationship Between Inference Ability, Aha Moments, and Response Length? + +The GRPO training process is illustrated in Figure 5a-d, which correspond to the validation set's accuracy reward, format reward, ratio of logical consistency reward to accuracy reward, and the response length, respectively. Notably, existing pure-text-based reproductions [55, 59] of DeepSeek-R-Zero models identify inference ability and the "aha moment" as key indicators of emergent reasoning capabilities. However, such phenomena are rarely observed in other multimodal reasoning tasks, such as image-based reasoning [10, 33]. This leads us to hypothesize that response length is strongly influenced by the nature of the question itself. For instance, mathematical problems often require multi-step calculations, where increased reasoning length tends to correlate positively with reasoning ability. In contrast, for multimodal reasoning tasks like embodied spatial + +reasoning, the LM model training process converges toward an optimal range of text output distributions. Concise reasoning patterns may facilitate the embodied spatial reasoning. This highlights the versatility of RL-based post-training method, demonstrating the ability to benefit a wide range of reasoning tasks. + +# 5.2 Why Not Directly Perform RL on VLLMs? + +We previously attempted direct RL training on the Qwen-VL-3B-Instruct model. As shown in Figure 5e, under similar training parameters and time, the performance of the VLM was notably inferior to that of the LM. Upon convergence, the VLM achieved an accuracy of $43.8\%$ on the test set, significantly lower than the LM. The limited perceptual capability of the VLM restricts its potential for reasoning improvements. Therefore, under resource-constrained conditions, collaborative inference integrating models of different scales present a promising solution. + +# 5.3 Is Accuracy+Format Rewards All You Need? + +According to the Deepseek-R1-Zero, it appears that accuracy and format rewards are enough to guide the model toward correct reasoning. However, during training in our problem, we observed instances of reward hacking, where the model optimizes the answer but the reasoning process leading to that answer is inconsistent with the answer itself. We aim to ensure alignment between the model's reasoning process and its answer, both to enhance generalization and improve the interpretability of the reasoning process. As shown in Figure 5f, we employ GPT-4o to evaluate the proportion of logically consistent outputs on the test set before and after incorporating a logical consistency reward. This proportion increased from $46.01\%$ to $99.43\%$ after the reward was added, demonstrating the value of this approach in addressing embodied spatial multiple-choice reasoning tasks. Moreover, this reward mechanism could potentially be extended to other reasoning tasks prone to answer accuracy hacking during training. + +# 5.4 RL vs SFT when Generalize to + +# Out-of-Distribution (OOD) Embodied Tasks? + +For small-scale LMs, we aim to explore their generalization performance when trained with SFT instead of RL. To evaluate this, we introduced two OOD datasets: EgoSchema and the egocentric task in MVBench. As discussed in Sections 4.1.1, these two OOD datasets differ significantly from the training set in both task content and scene characteristics. The accuracy results are shown in Figure 5g. RL-trained models demonstrate generalization ability across both datasets. On the EgoSchema dataset, the RL-trained language model under the Embodied-R framework even achieve performance comparable to the state-of-the-art multimodal reasoning model, Gemini2.5-Pro. SFT-trained models showed improvement on EgoSchema but a decline on MVBench. This suggests that slow reasoning, as employed in RL models, could be a promising approach to improve the generalization capabilities even for small-scale models. + +# 6 Conclusion + +To address embodied spatial reasoning tasks, we propose a collaborative framework that leverages the perceptual capabilities of large-scale VLMs and the reasoning potential of compact LMs. + +Through 90 hours of RL training on a 3B LM using 8 NVIDIA A800-SXM4-40GB GPUs, Embodied-R surpasses OpenAI-o1 by $13.9\%$ and Gemini-2.5-Pro by $10.3\%$ on the test set. Other Key findings include: (1) RL training leads to output length convergence, aligning with the requirements of the task; (2) the reasoning upper bound of same-scale VLMs trained with RL is significantly lower than that of Embodied-R, due to inherent limitations in perception; (3) the proposed logical consistency reward enhances reasoning quality; and (4) models trained via RL exhibit stronger generalization on out-of-distribution datasets compared to those trained with SFT. + +# References + +[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. 2024. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219 (2024). +[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ige Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023). +[3] Michael Ahn, Debidatta Dwibedi, Chelsea Finn, Montse Gonzalez Arenas, Keerthana Gopalakrishnan, Karol Hausman, Brian Ichter, Alex Irpan, Nikhil Joshi, Ryan Julian, et al. 2024. Autort: Embodied foundation models for large scale orchestration of robotic agents. arXiv preprint arXiv:2401.12963 (2024). +[4] Cameron A Aubin, Benjamin Gorissen, Edoardo Milana, Philip R Buskohl, Nathan Lazarus, Geoffrey A Slipher, Christoph Keplinger, Josh Bongard, Fumiya Iida, Jennifer A Lewis, et al. 2022. Towards enduring autonomous robots via embodied energy. Nature 602, 7897 (2022), 393-402. +[5] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. 2022. Scanqa: 3d question answering for spatial scene understanding. In proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 19129-19139. +[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025). +[7] Keshigeyan Chandrasegaran, Agrim Gupta, Lea M Hadzic, Taran Kota, Jimming He, Cristóbal Eyzaguirre, Zane Durante, Manling Li, Jiajun Wu, and Fei-Fei Li. 2024. Hourvideo: 1-hour video-language understanding. Advances in Neural Information Processing Systems 37 (2024), 53168-53197. +[8] Bolei Chen, Jiaxu Kang, Ping Zhong, Yixiong Liang, Yu Sheng, and Jianxin Wang. 2024. Embodied Contrastive Learning with Geometric Consistency and Behavioral Awareness for Object Navigation. In Proceedings of the 32nd ACM International Conference on Multimedia, 4776-4785. +[9] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. 2024. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 14455-14465. +[10] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. 2025. R1-V: Reinforcing Super Generalization Ability in Vision-Language Models with Less Than $3. https://github.com/Deep-Agent/R1-V. Accessed: 2025-02-02. +[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198. +[12] Sijie Cheng, Kichen Fang, Yangyang Yu, Sicheng Zhou, Bohao Li, Ye Tian, Tingguang Li, Lei Han, and Yang Liu. 2024. Videogthink: Assessing egocentric video understanding capabilities for embodied ai. arXiv preprint arXiv:2410.11623 (2024). +[13] Stephanie Clarke and Judit Miklossy. 1990. Occipital cortex in man: Organization of callosal connections, related myelo-and cytoarchitecture, and putative boundaries of functional visual areas. Journal of Comparative Neurology 298, 2 (1990), 188-214. +[14] Maël Donoso, Anne GE Collins, and Etienne Koechlin. 2014. Foundations of human reasoning in the prefrontal cortex. Science 344, 6191 (2014), 1481-1486. +[15] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aankansha Chowdhery, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, Wenlong Huang, et al. 2023. Palm-e: An embodied multimodal language model. (2023). +[16] Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong-Li Lee, and Wynne Hsu. 2024. Video-of-thought: Step-by-step video reasoning from perception to cognition. arXiv preprint arXiv:2501.03230 (2024). +[17] Nanyi Fei, Zhiwu Lu, Yizhao Gao, Guoxing Yang, Yuqi Huo, Jingyuan Wen, Haoyu Lu, Ruihua Song, Xin Gao, Tao Xiang, et al. 2022. Towards artificial general intelligence via a multimodal foundation model. Nature Communications + +13, 1 (2022), 3094. +[18] Leonardo Fogassi, Pier Francesco Ferrari, Benno Gesierich, Stefano Rozzi, Fabian Chersi, and Giacomo Rizzolatti. 2005. Parietal lobe: from action organization to intention understanding. Science 308, 5722 (2005), 662-667. +[19] Lucia Foglia and Robert A Wilson. 2013. Embodied cognition. Wiley Interdisciplinary Reviews: Cognitive Science 4, 3 (2013), 319-325. +[20] Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. 2024. EmbodiedCity: A Benchmark Platform for Embodied Agent in Real-world City Environment. arXiv preprint arXiv:2410.09604 (2024). +[21] Google. 2024. Gemini API. https://ai.google.dev/gemini-api. Accessed: 2025-04-12. +[22] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. 2022. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 18995-19012. +[23] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rStar-Math: Small LLMs Can Master Math Reasoning with Self-Evolved Deep Thinking. arXiv preprint arXiv:2501.04519 (2025). +[24] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025). +[25] Shima Imani, Liang Du, and Harsh Shrivastava. 2023. Mathprompter: Mathematical reasoning using large language models. arXiv preprint arXiv:2303.05398 (2023). +[26] James Intriligator and Patrick Cavanagh. 2001. The spatial resolution of visual attention. Cognitive psychology 43, 3 (2001), 171-216. +[27] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. 2024. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 22195-22206. +[28] Tianlin Li, Qian Liu, Tianyu Pang, Chao Du, Qing Guo, Yang Liu, and Min Lin. 2024. Purifying large language models by assembling a small language model. arXiv preprint arXiv:2402.14845 (2024). +[29] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. 2023. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122 (2023). +[30] Fangyu Liu, Guy Emerson, and Nigel Collier. 2023. Visual spatial reasoning. Transactions of the Association for Computational Linguistics 11 (2023), 635-651. +[31] Hongbin Liu, Yongze Zhao, Peng Dong, Xiuyi Guo, and Yilin Wang. 2024. IOFTracker: A Two-Stage Multiple Targets Tracking Method Using Spatial-Temporal Fusion Algorithm. Applied Sciences 15, 1 (2024), 107. +[32] Yang Liu, Weixing Chen, Yongjie Bai, Xiaodan Liang, Guanbin Li, Wen Gao, and Liang Lin. 2024. Aligning cyber space with physical world: A comprehensive survey on embodied ai. arXiv preprint arXiv:2407.06886 (2024). +[33] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025). +[34] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. 2023. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems 36 (2023), 46212-46244. +[35] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413 (2024). +[36] Yao Mu, Qinglong Zhang, Mengkang Hu, Wenhai Wang, Mingyu Ding, Jun Jin, Bin Wang, Jifeng Dai, Yu Qiao, and Ping Luo. 2023. Embodiedgpt: Vision-language pre-training via embodied chain of thought. Advances in Neural Information Processing Systems 36 (2023), 25081-25094. +[37] OpenAI. 2024. GPT-4o API. https://openai.com/api/. Accessed: 2025-04-12. +[38] OpenAI. 2024. Learning to Reason with LLMs. https://openai.com/index/learning-to-reason-with-llms/ Accessed: 2025-03-04. +[39] OpenAI. 2025. OpenAI o3-mini. https://openai.com/index/openai-o3-mini/ Accessed: 2025-04-15. +[40] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. 2025. RI on incorrect synthetic data scales the efficiency of lll math reasoning by eight-fold. Advances in Neural Information Processing Systems 37 (2025), 43000-43031. +[41] Dhruv Shah, Blazej Osinski, Sergey Levine, et al. 2023. Lm-nav: Robotic navigation with large pre-trained models of language, vision, and action. In Conference on robot learning. PMLR, 492–504. +[42] Alessandro Suglia, Claudio Greco, Katie Baker, Jose L Part, Ioannis Papaioannou, Arash Eshghi, Ioannis Konstas, and Oliver Lemon. 2024. Alanavlm: A multimodal embodied ai foundation model for egocentric video understanding. arXiv preprint arXiv:2406.13807 (2024). + +[43] Guangzhi Sun, Yudong Yang, Jimin Zhuang, Changli Tang, Yixuan Li, Wei Li, Zejun MA, and Chao Zhang. 2025. video-SALMONN-01: Reasoning-enhanced Audio-visual Large Language Model. arXiv preprint arXiv:2502.11775 (2025). +[44] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. 2023. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023). +[45] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with lms. arXiv preprint arXiv:2501.12599 (2025). +[46] Qwen Team. 2024. Qwen-VL-Max. https://qwenlm.github.io/blog/qwen-vl-max/. Accessed: 2025-04-12. +[47] Qwen Team. 2024. QwQ: Reflect Deeply on the Boundaries of the Unknown. https://qwenlm.github.io/blog/qwq-32b-preview/ +[48] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186 (2025). +[49] Dennis Ulmer, Martin Gubri, Hwaran Lee, Sangdoo Yun, and Seong Joon Oh. 2024. Calibrating large language models using their generations only. arXiv preprint arXiv:2403.05973 (2024). +[50] Fali Wang, Zhiwei Zhang, Xianren Zhang, Zongyu Wu, Tzuhao Mo, Qiuhao Lu, Wanjing Wang, Rui Li, Junjie Xu, Xianfeng Tang, et al. 2024. A comprehensive survey of small language models in the era of large language models: Techniques, enhancements, applications, collaboration with llms, and trustworthiness. arXiv preprint arXiv:2411.03350 (2024). +[51] Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Sharon Li, and Neel Joshi. 2024. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. Advances in Neural Information Processing Systems 37 (2024), 75392-75421. +[52] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. 2024. Embodiedscan: A holistic multi-modal 3d perception suite towards embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1975-1976. +[53] Zhecan Wang, Garrett Bingham, Adams Wei Yu, Quoc V Le, Thang Luong, and Golnaz Ghiasi. 2024. Haloquest: A visual hallucination dataset for advancing multimodal reasoning. In European Conference on Computer Vision. Springer, 288-304. +[54] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837. +[55] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-RL: Unleashing LLM Reasoning with Rule-Based Reinforcement Learning. arXiv preprint arXiv:2502.14768 (2025). +[56] Cheng Xu, Xiaofeng Hou, Jiacheng Liu, Chao Li, Tianhao Huang, Xiaozhi Zhu, Mo Niu, Lingyu Sun, Peng Tang, Tongqiao Xu, et al. 2023. Mmbench: Benchmarking end-to-end multi-modal dnns and understanding their hardware-software implications. In 2023 IEEE International Symposium on Workload Characterization (IISWC). IEEE, 154-166. +[57] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. 2024. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122 (2024). +[58] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024). +[59] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 2025. 7B Model and 8K Examples: Emerging Reasoning with Reinforcement Learning is Both Effective and Efficient. https://hkust-nlp.notion.site/simplerl-reason. Notion Blog. +[60] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. 2025. Rest-mcts*: LIm self-training via process reward guided tree search. Advances in Neural Information Processing Systems 37 (2025), 64735-64772. +[61] Yiming Zhang, Nicholas Carlini, and Daphne Ippolito. 2023. Effective prompt extraction from language models. arXiv preprint arXiv:2307.06865 (2023). +[62] Baining Zhao, Jianjie Fang, Zichao Dai, Ziyou Wang, Jirong Zha, Weichen Zhang, Chen Gao, Yue Wang, Jinqiang Cui, Xinlei Chen, and Yong Li. 2025. UrbanVideo-Bench: Benchmarking Vision-Language Models on Embodied Intelligence with Video Data in Urban Spaces. arXiv:2503.06157 [cs.CV] https://arxiv.org/abs/2503.06157 +[63] Theodore Zhao, Mu Wei, J Samuel Preston, and Hoifung Poon. 2023. Automatic Calibration and Error Correction for Generative Large Language Models via Pareto Optimal Self-Supervision. (2023). +[64] Karl Zilles and Katrin Amunts. 2010. Centenary of Brodmann's map—conception and fate. Nature Reviews Neuroscience 11, 2 (2010), 139-145. + +# A Appendix + +# A.1 Dataset Introduction + +UrbanVideo-Bench: UrbanVideo-Bench is one of the training and testing datasets designed for embodied reasoning (embodied-r). This benchmark was proposed by Tsinghua University in February 2025. It captures two embodied characteristics of urban environments: complex urban scenes featuring dynamic and static elements, and unique aerial navigation scenarios. The dataset consists of 4 categories and 16 tasks, aimed at evaluating Video-LLMs in terms of recall, perception, reasoning, and navigation capabilities. In our paper, we focus on 4 of these complex tasks for reinforcement learning in video-based learning: Landmark Position, Counterfactual Reasoning, Progress Evaluation, and Action Generation, which represent challenging embodied outdoor tasks. + +VSI-Bench: VSI-Bench is another training and testing dataset for embodied reasoning (embodied-r). Proposed by Fei-Fei Li's team at Stanford in December 2024, this benchmark provides high-quality evaluation metrics for assessing the 3D, video-based, visual-spatial intelligence of multimodal large language models (MLLMs). The dataset comprises 2 categories and 8 tasks designed to evaluate key aspects of spatial reasoning. In our paper, we focus on 4 tasks for reinforcement learning in video-based learning: Relative Distance, Relative Direction, Route Planning, and Appearance Order, all of which are categorized as challenging embodied outdoor tasks. + +EgoSchema: EgoSchema is one of the Out-of-Distribution (OOD) datasets utilized to evaluate the generalization capability of our model. This dataset is specifically designed as a long-form video question-answering benchmark, aimed at assessing modern vision and language systems' ability to understand and reason over extended video content. It provides a rigorous evaluation framework for long video understanding tasks. + +MVBench: MVBench is another Out-of-Distribution (OOD) dataset employed to test the generalization capability of our model. MVBench consists of 20 complex video tasks, offering a comprehensive benchmark for evaluating the video understanding capabilities of existing multimodal models. This dataset is designed to address diverse and challenging scenarios in video-based reasoning. + +# A.2 Details of Key-Frame Extractor + +The goal of key-frame extraction is to ensure sufficient information gain between frames while maintaining a certain degree of overlap. The specific process is as follows: + +Step 1: a perspective transformation is used to model the geometric relationship between frames. Assuming $f_{t}$ is a key-frame, to determine whether $f_{t + 1}$ should also be considered a keyframe, keypoints and descriptors are calculated from $f_{t}$ and $f_{t + 1}$ using the Oriented FAST and Rotated BRIEF (ORB) algorithm: + +$$ +\text {K e y p o i n t s} _ {t}, \text {D e s c r i p t o r s} _ {t} = \mathrm {O R B} (f _ {t}), \tag {9} +$$ + +$$ +\text {K e y p o i n t s} _ {t + 1}, \text {D e s c r i p t o r s} _ {t + 1} = \operatorname {O R B} \left(f _ {t + 1}\right). \tag {10} +$$ + +Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames, identifying corresponding keypoint pairs $\mathbf{l}_t^{\mathrm{key}}$ and $\mathbf{l}_{t + 1}^{\mathrm{key}}$ . Using the matched keypoint pairs, the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix $\mathbf{M}$ , which maps the content of $f_{t + 1}$ to the coordinate space of $f_t$ . + +Step 2: The overlap ratio between two frames is then computed. Assuming the size of each video frame is $w \times h$ , for frames $f_{t}$ and $f_{t+1}$ : $\mathbf{l}_{t} = \{ [0,0], [w,0], [w,h], [0,h] \}$ represents the four corner points of $f_{t}$ ; $\mathbf{l}_{t+1} = \{ [0,0], [w,0], [w,h], [0,h] \}$ represents the four corner points of $f_{t+1}$ . Using the homography matrix $\mathbf{M}$ , the corner points $\mathbf{l}_{t+1}$ of $f_{t+1}$ are transformed into the coordinate space of $f_{t}$ : $\mathbf{l}_{t+1,i}' = \mathbf{M} \cdot \mathbf{l}_{t+1,i}$ , where $\mathbf{l}_{t+1,i} = [x,y,1]^T$ represents the corner points of $f_{t+1}$ in homogeneous coordinates, and $\mathbf{l}_{t+1,i}' = [x',y',w']^T$ represents the transformed corner points. The transformed points are further normalized to recover 2D coordinates, resulting in a quadrilateral representing $f_{t+1}$ in $f_{t}$ 's space. In $f_{t}$ 's coordinate space, there are two polygons: Polygon $L_{t}$ is defined by the corner points $\mathbf{l}_{t}$ of $f_{t}$ ; Polygon $L_{t+1}'$ is defined by the transformed corner points $\mathbf{l}_{t+1}'$ . Thus, the overlap ratio $c$ is defined as: + +$$ +c = \frac {\operatorname {A r e a} \left(L _ {t} \cap L _ {t + 1} ^ {\prime}\right)}{\operatorname {A r e a} _ {\text {t o t a l}}}. \tag {11} +$$ + +If $c$ is less than a predefined threshold $\varepsilon$ , it indicates significant visual changes between the frames, and $f_{t+1}$ is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between $f_t$ and $f_{t+2}$ . This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as $\mathbf{f}' = [f_{k_0}, f_{k_1}, \dots, f_{k_n}]$ , the keyframe extraction process can be summarized as: + +$$ +\mathbf {f} ^ {\prime} = \mathrm {K} - \text {E x t r a c t o r} (\mathbf {f}). \tag {12} +$$ + +# A.3 Details of Data Preparation + +A.3.1 Task Selection Criteria. In our study, we carefully selected specific tasks that emphasize spatial reasoning capabilities during motion within three-dimensional physical space. The selection process was guided by several key considerations: + +Focus on Reasoning Processes: We prioritized tasks that require deep cognitive processing rather than simple recognition or recall. As highlighted in the main text, embodied spatial reasoning involves complex spatio-temporal relationships where agents must discover object associations across frames and extract task-relevant semantics. For instance, navigation tasks require agents to infer their location from historical observations, construct mental maps, develop high-level plans, and determine specific actions—processes that demand sophisticated reasoning capabilities. + +Diversity in Spatial Contexts: To ensure comprehensive evaluation, we selected tasks from both indoor (VSI-Bench) and outdoor (UrbanVideo-Bench) environments, providing diverse spatial contexts that test different aspects of embodied reasoning. This diversity is crucial for evaluating the generalizability of our approach across varying spatial scales and environmental complexities. + +Emphasis on Long Reasoning Chains: We specifically targeted tasks characterized by long spatial reasoning chains and historically low accuracy rates. These challenging tasks better demonstrate the value of our "slow thinking" approach, which encourages thorough reasoning before generating responses—similar to how + +Table 4: Hyperparameters used in reinforcement learning training of Embodied-R. + +
HyperparameterValue
OptimizerAdamW
Learning Rate5e-7
Temperature1.0
Train Batch Size32
Rollout Size8
KL Coefficient0.001
Maximum Response Length2048
Input Length6144
Training Epochs12
+ +recent advances in mathematical and scientific reasoning have benefited from reinforcement learning techniques. + +Deterministic Evaluation: All selected tasks were formulated as multiple-choice question-answering problems to ensure determinism in answers, facilitating both RL training and direct calculation of accuracy for performance evaluation. + +A.3.2 Question Filtering Methodology. To ensure the quality and validity of our dataset, we implemented a rigorous question filtering process: + +Blind Testing Filter: We first evaluated questions using an untrained 7B language model without video input (blind selection). Questions that could be correctly answered without visual information were identified as potentially problematic, as they might rely more on textual patterns or common knowledge rather than genuine spatial reasoning based on video content. + +SFT-based Filtering: After conducting supervised fine-tuning (SFT) without video inputs, we analyzed which question types + +showed significant improvement in accuracy. Categories where the model's performance increased substantially without visual information were flagged for removal, as this indicated strong correlations between question text and answers that could be exploited without actual spatial reasoning. + +Correlation Analysis: We specifically eliminated question types where: + +- The model could achieve high accuracy without accessing video content +- Performance improved dramatically after text-only SFT training +- Question-answer pairs exhibited strong textual patterns that could be exploited without spatial understanding + +This filtering methodology ensured that our final dataset genuinely tests embodied spatial reasoning capabilities rather than linguistic pattern matching or prior knowledge exploitation. By removing questions with strong text-answer correlations, we created a more challenging and valid benchmark that requires models to truly understand spatial relationships from video content. + +# A.4 RL Hyperparameters + +The reinforcement learning (RL) training of Embodied-R requires careful hyperparameter tuning to balance computational efficiency with model performance. We conducted extensive experiments to determine the optimal configuration for our collaborative framework. The key hyperparameters used in our RL training process are summarized in Table 4. These settings were selected to ensure stable training while maximizing the model's embodied reasoning capabilities. Notably, we used a relatively small learning rate (5e-7) to prevent catastrophic forgetting and a moderate KL coefficient (0.001) to maintain proximity to the reference model while allowing sufficient exploration. \ No newline at end of file diff --git a/data/2025/2504_12xxx/2504.12680/images/02393c833bbae8e6c12fcf40fb48a9b3a5d87263d5f9abc11d381f176dd35a56.jpg b/data/2025/2504_12xxx/2504.12680/images/02393c833bbae8e6c12fcf40fb48a9b3a5d87263d5f9abc11d381f176dd35a56.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97a0a4c69c950319fdad2c7635f5d26258533df8 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/02393c833bbae8e6c12fcf40fb48a9b3a5d87263d5f9abc11d381f176dd35a56.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32084499da028448ed42fed7f371a837a9cde278395c0927a5e1ef0d0e4f2dec +size 12649 diff --git a/data/2025/2504_12xxx/2504.12680/images/0690f619416d8140759d8bfe85c98be41326383f16d97579a0edd83c69a8451f.jpg b/data/2025/2504_12xxx/2504.12680/images/0690f619416d8140759d8bfe85c98be41326383f16d97579a0edd83c69a8451f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79503aead3beff1af8d0441d6aaf965a8dd46187 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/0690f619416d8140759d8bfe85c98be41326383f16d97579a0edd83c69a8451f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df9380d1270c0166d16996856346d6df08e543202f0fc14d920b95b434dcb9db +size 964 diff --git a/data/2025/2504_12xxx/2504.12680/images/07b7a0b7ff464267e1f94ac8af3cecb76bbbacb0d09b986b837e1427bcd0b037.jpg b/data/2025/2504_12xxx/2504.12680/images/07b7a0b7ff464267e1f94ac8af3cecb76bbbacb0d09b986b837e1427bcd0b037.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31a045614ffb8b07a1452fb9684e912012968609 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/07b7a0b7ff464267e1f94ac8af3cecb76bbbacb0d09b986b837e1427bcd0b037.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61b8d3dee1d2dff4ed70274c70ec0adb3e44194ee7a295e21a6674bf67cf7301 +size 180323 diff --git a/data/2025/2504_12xxx/2504.12680/images/260ca3540d2ad6cd03f0d4ebb90864c70e7bb2a3f3d5777757b66e28572d71f1.jpg b/data/2025/2504_12xxx/2504.12680/images/260ca3540d2ad6cd03f0d4ebb90864c70e7bb2a3f3d5777757b66e28572d71f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41f7aa41930a83e5d129848ec2ec0f00e6574917 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/260ca3540d2ad6cd03f0d4ebb90864c70e7bb2a3f3d5777757b66e28572d71f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d7aa0ed32259c3e1ed00c974e8bf4e2d2908fb5e8e74ed067388abe301b3120 +size 1124 diff --git a/data/2025/2504_12xxx/2504.12680/images/2b9e5f7a41386ace07624dec3ec03897c00121bef7fd942a2b707bf0ce1c5755.jpg b/data/2025/2504_12xxx/2504.12680/images/2b9e5f7a41386ace07624dec3ec03897c00121bef7fd942a2b707bf0ce1c5755.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d05f0d7d7fb676b62ebbe00f8bce94454bff4b51 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/2b9e5f7a41386ace07624dec3ec03897c00121bef7fd942a2b707bf0ce1c5755.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e67b3a33cfbb22e1f3c8101027892cab3779c363ca3124740bc8e5b90df1b63 +size 111549 diff --git a/data/2025/2504_12xxx/2504.12680/images/3bcfbaeaa5a6a8192aac0e4b3e9daf48ccbad2134dfc27424815135ee806a7fd.jpg b/data/2025/2504_12xxx/2504.12680/images/3bcfbaeaa5a6a8192aac0e4b3e9daf48ccbad2134dfc27424815135ee806a7fd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa13b62fb3beec3ad77669703d6d1dcc7f398519 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/3bcfbaeaa5a6a8192aac0e4b3e9daf48ccbad2134dfc27424815135ee806a7fd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4f2ab2cf0d0276185622131f9d3fb5c0de7ebc7aa02070c8d226b0b01588bada +size 3153 diff --git a/data/2025/2504_12xxx/2504.12680/images/4cf39cf0bb183bad791748ef428042ca4b82baf13f46cb79e52f0b671ac61e5b.jpg b/data/2025/2504_12xxx/2504.12680/images/4cf39cf0bb183bad791748ef428042ca4b82baf13f46cb79e52f0b671ac61e5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0cdd83f9176820a48c4c23d44f298ee146b6b98b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/4cf39cf0bb183bad791748ef428042ca4b82baf13f46cb79e52f0b671ac61e5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72584aed4bb5c6f894bf236bcdf4f9cf3c2d73bae33b97ca87cafbcafed21dab +size 6607 diff --git a/data/2025/2504_12xxx/2504.12680/images/56d31757223b443053a84dfd4c15a0d5d2b58369a64b1d2b25eb891bf669ce7f.jpg b/data/2025/2504_12xxx/2504.12680/images/56d31757223b443053a84dfd4c15a0d5d2b58369a64b1d2b25eb891bf669ce7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..38ac5f8b219f23701a1409dd63e256aba5ad9c10 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/56d31757223b443053a84dfd4c15a0d5d2b58369a64b1d2b25eb891bf669ce7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f70939d492ad6ed4c6fa36227d79345d17328572dcff07a9150a53e34cbf1197 +size 4573 diff --git a/data/2025/2504_12xxx/2504.12680/images/5a7204fe5ebc0eb56147d5cc28c1831272e0043c1ada6eb035614ccd44df17a2.jpg b/data/2025/2504_12xxx/2504.12680/images/5a7204fe5ebc0eb56147d5cc28c1831272e0043c1ada6eb035614ccd44df17a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3b3de25b62e03645d257448ce8f8378bf17ff457 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/5a7204fe5ebc0eb56147d5cc28c1831272e0043c1ada6eb035614ccd44df17a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7edcd1a513baae14af684a259dcaadead03e9470ebad1795ee74e2c83981497f +size 15487 diff --git a/data/2025/2504_12xxx/2504.12680/images/5b0471981f3a1f19157135899e7655288f65f2ed892ff00e95c43e9724d27af4.jpg b/data/2025/2504_12xxx/2504.12680/images/5b0471981f3a1f19157135899e7655288f65f2ed892ff00e95c43e9724d27af4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f9da3288e7ae72c00e2394d340fbceaee3e8584d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/5b0471981f3a1f19157135899e7655288f65f2ed892ff00e95c43e9724d27af4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f519cbed0b1f8e7130258bd36f5a9f2d8a5e4cd728aeacab239a4e8403b4de01 +size 21447 diff --git a/data/2025/2504_12xxx/2504.12680/images/5d7121079f97ca1cb2686b2a716e2f0a0ab89e209b2921ed014ce3f689dc13b2.jpg b/data/2025/2504_12xxx/2504.12680/images/5d7121079f97ca1cb2686b2a716e2f0a0ab89e209b2921ed014ce3f689dc13b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..179f6da087f664f3eaf0bd6bc7ce1f81d69d679e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/5d7121079f97ca1cb2686b2a716e2f0a0ab89e209b2921ed014ce3f689dc13b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12ddbece106fcce67f6c5c1a02f3f2c07a478b3e316549ca0f3c5c541b99d256 +size 5203 diff --git a/data/2025/2504_12xxx/2504.12680/images/5ff31b3efbfea499b28ba5da6236251848769ad72fedf064dac36cb1cd14bf7b.jpg b/data/2025/2504_12xxx/2504.12680/images/5ff31b3efbfea499b28ba5da6236251848769ad72fedf064dac36cb1cd14bf7b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1aea282cbf760da033dd2c28a22dec0e9061094b --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/5ff31b3efbfea499b28ba5da6236251848769ad72fedf064dac36cb1cd14bf7b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23ca3c768e5f9774c474041cfff90dbbae069d1958db6139b5984a1e4466f26e +size 13845 diff --git a/data/2025/2504_12xxx/2504.12680/images/626c513f3b37a8921a333604e0f80d44cc7cf961026f0bb1bb4343b8b9125637.jpg b/data/2025/2504_12xxx/2504.12680/images/626c513f3b37a8921a333604e0f80d44cc7cf961026f0bb1bb4343b8b9125637.jpg new file mode 100644 index 0000000000000000000000000000000000000000..795548c9d0d46f73de7443c03843a78930121d5c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/626c513f3b37a8921a333604e0f80d44cc7cf961026f0bb1bb4343b8b9125637.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6dbab48c79c174f0f396f7088bf89d53e37d3bd898af7f61980993f45d086bc +size 4197 diff --git a/data/2025/2504_12xxx/2504.12680/images/62b0a49ba975743ab2da724498d96bedd02c9a57a3ea5856833650e9fef3dfdb.jpg b/data/2025/2504_12xxx/2504.12680/images/62b0a49ba975743ab2da724498d96bedd02c9a57a3ea5856833650e9fef3dfdb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9c3427dba526f64e68882c690eb16b9108625c6 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/62b0a49ba975743ab2da724498d96bedd02c9a57a3ea5856833650e9fef3dfdb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:785e46c629f74e492e2139c51ad0709e6bfa2567a28d0f71ddb44a79c966afb0 +size 29817 diff --git a/data/2025/2504_12xxx/2504.12680/images/65dc4ebdfd7213c98c84a9db3c98bd4a53e477d314b772599b0f29c8fc018ae2.jpg b/data/2025/2504_12xxx/2504.12680/images/65dc4ebdfd7213c98c84a9db3c98bd4a53e477d314b772599b0f29c8fc018ae2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d6d0bbe0c323a30314def7ec0a7ddef2df827ee --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/65dc4ebdfd7213c98c84a9db3c98bd4a53e477d314b772599b0f29c8fc018ae2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d86f736739c24e6c4db936fb810f0421ff8f8e044bc4a4ef60cf733671401b6 +size 15592 diff --git a/data/2025/2504_12xxx/2504.12680/images/6b2092ae3ac9d8a38a7043afa27952f1f670fafcd1820ad106db5ceb62650bc1.jpg b/data/2025/2504_12xxx/2504.12680/images/6b2092ae3ac9d8a38a7043afa27952f1f670fafcd1820ad106db5ceb62650bc1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0cfed174ba99d9d72ea8aa671281fa12529742c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/6b2092ae3ac9d8a38a7043afa27952f1f670fafcd1820ad106db5ceb62650bc1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea0c69768822f45cb1c475f7f2affecd43663cad0b589cf8294f1308a4cbdd32 +size 958 diff --git a/data/2025/2504_12xxx/2504.12680/images/7466a5b5cddbe1975919ac7f8fb16269bedbff7a95fb25f6839397ba74145802.jpg b/data/2025/2504_12xxx/2504.12680/images/7466a5b5cddbe1975919ac7f8fb16269bedbff7a95fb25f6839397ba74145802.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f588628b07a6c6a412e830d4506c5395716ab583 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/7466a5b5cddbe1975919ac7f8fb16269bedbff7a95fb25f6839397ba74145802.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61afd7e1166a30a20b01d8eee7dde360998aa57e338c031ceed46f47f20a8449 +size 1111 diff --git a/data/2025/2504_12xxx/2504.12680/images/77957a7d881be79b7f92f5a86858a4906ac4cfb5dbbbfe27cc3a19d2ebeec4a2.jpg b/data/2025/2504_12xxx/2504.12680/images/77957a7d881be79b7f92f5a86858a4906ac4cfb5dbbbfe27cc3a19d2ebeec4a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46abaa08b77bde4100b392225848c765ecf12959 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/77957a7d881be79b7f92f5a86858a4906ac4cfb5dbbbfe27cc3a19d2ebeec4a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:342dbd435ffe4dd621c457014993289fdfc2128ccb2431f6eb9e49b855deacc1 +size 13652 diff --git a/data/2025/2504_12xxx/2504.12680/images/7ccac7711d85d0b54aad2b7e8ab68869f72c239a6fc670904f642d5397345cc2.jpg b/data/2025/2504_12xxx/2504.12680/images/7ccac7711d85d0b54aad2b7e8ab68869f72c239a6fc670904f642d5397345cc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..32d0451bf7553914d46c4318bdb1ba72adf315d0 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/7ccac7711d85d0b54aad2b7e8ab68869f72c239a6fc670904f642d5397345cc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc41d7f8a5a3dfa3be3c8f1f009ef5699fa8876b1f27ec729256cfffba0908d7 +size 25628 diff --git a/data/2025/2504_12xxx/2504.12680/images/8066252818aa0f756bf50e09469d3ee84df1cccc7b9c939aa9775510b37a56f7.jpg b/data/2025/2504_12xxx/2504.12680/images/8066252818aa0f756bf50e09469d3ee84df1cccc7b9c939aa9775510b37a56f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf7be09a146ad0c52275b2facfa2ad985066ff93 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/8066252818aa0f756bf50e09469d3ee84df1cccc7b9c939aa9775510b37a56f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8afeb7d785700c968fc3412371869f7db8fad4cf5be513aa614435be9c53f97c +size 7445 diff --git a/data/2025/2504_12xxx/2504.12680/images/8fe50a38bdcaff17c4598a1be54b92b276eda1fc2d6c11ae4b445b89417288e8.jpg b/data/2025/2504_12xxx/2504.12680/images/8fe50a38bdcaff17c4598a1be54b92b276eda1fc2d6c11ae4b445b89417288e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b9725a71af4ce92156c1b4084fbfa7255afcaed --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/8fe50a38bdcaff17c4598a1be54b92b276eda1fc2d6c11ae4b445b89417288e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82fd78d26ede0db1f7f9ace5e1f9283a6978893207a8172449ad494875213453 +size 1197 diff --git a/data/2025/2504_12xxx/2504.12680/images/9c8f658d66e081cf723488b6f76f91ee89abd848c964038024d4a57588ef7cd3.jpg b/data/2025/2504_12xxx/2504.12680/images/9c8f658d66e081cf723488b6f76f91ee89abd848c964038024d4a57588ef7cd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6819cec50e36e9794695026509ad0a66a3c2eb9 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/9c8f658d66e081cf723488b6f76f91ee89abd848c964038024d4a57588ef7cd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:671c7a68c7746e962c03364b7668da24d5a50776681ccc825b14a35dda5c08d6 +size 30204 diff --git a/data/2025/2504_12xxx/2504.12680/images/a38e1a2a0590e90f4eb270befb537b4680adaf74a94c8e1149fbd2b6a5e1a4c2.jpg b/data/2025/2504_12xxx/2504.12680/images/a38e1a2a0590e90f4eb270befb537b4680adaf74a94c8e1149fbd2b6a5e1a4c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cbe4f9afe89f7e7921bc7e649b4b680ce8557cf --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/a38e1a2a0590e90f4eb270befb537b4680adaf74a94c8e1149fbd2b6a5e1a4c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b6e772e6ce8a02ab1a6d7bad31d6415655aca86acc8f63a9600574b6a2047c8 +size 38056 diff --git a/data/2025/2504_12xxx/2504.12680/images/a7afab221f6d6c3288ef9526fa3ff072372cb905f27e3f3fa7b35a2294a9b74a.jpg b/data/2025/2504_12xxx/2504.12680/images/a7afab221f6d6c3288ef9526fa3ff072372cb905f27e3f3fa7b35a2294a9b74a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..40c1a547a156d0201d539d5bb60dda87c0293e69 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/a7afab221f6d6c3288ef9526fa3ff072372cb905f27e3f3fa7b35a2294a9b74a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:186952dc676a719d33a83aa08082b0ce41ddf6a5f90b72ec4e52d305d56bd82b +size 10886 diff --git a/data/2025/2504_12xxx/2504.12680/images/b26be9858af5be7bd451a3520f190c77d197a842af79e537534c1d3cdadee387.jpg b/data/2025/2504_12xxx/2504.12680/images/b26be9858af5be7bd451a3520f190c77d197a842af79e537534c1d3cdadee387.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08c3f857cd1016a708160b956c3fbd27e494f8b4 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/b26be9858af5be7bd451a3520f190c77d197a842af79e537534c1d3cdadee387.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fe514afa2159b5f7f026eeca5124245c8758c1e9ac8a9b26c8eede8900309ef +size 10632 diff --git a/data/2025/2504_12xxx/2504.12680/images/b4adaf2d4183da6749bf4525fe2d5c946fa39eb4214e564e23bd8a915e471952.jpg b/data/2025/2504_12xxx/2504.12680/images/b4adaf2d4183da6749bf4525fe2d5c946fa39eb4214e564e23bd8a915e471952.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f810d458eb0b168997ca9af269eaf35aa4b3011a --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/b4adaf2d4183da6749bf4525fe2d5c946fa39eb4214e564e23bd8a915e471952.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:206c89c32d98bdee62487a61a74943caa451af957186e19a2640a08e9ad8ed67 +size 172597 diff --git a/data/2025/2504_12xxx/2504.12680/images/b72f62ac79b3b4ecd587477fe5880cff8321027484d74e9a4a7c196167a6ee4f.jpg b/data/2025/2504_12xxx/2504.12680/images/b72f62ac79b3b4ecd587477fe5880cff8321027484d74e9a4a7c196167a6ee4f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c06726ad124d88c3b4664ba84353faae57e30b34 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/b72f62ac79b3b4ecd587477fe5880cff8321027484d74e9a4a7c196167a6ee4f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e725d8e57260ec442eebb361d00cbbc330e3525caaedb4c78b4dc03c1f3183b +size 16477 diff --git a/data/2025/2504_12xxx/2504.12680/images/bcdca8529b76556187765c9b5a87cb0912a7dc6dfab13b6c798397055f198039.jpg b/data/2025/2504_12xxx/2504.12680/images/bcdca8529b76556187765c9b5a87cb0912a7dc6dfab13b6c798397055f198039.jpg new file mode 100644 index 0000000000000000000000000000000000000000..965d6aaac8380df3302acfc9f05a1bd04b8a5a4d --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/bcdca8529b76556187765c9b5a87cb0912a7dc6dfab13b6c798397055f198039.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6659aa3a0aea50cf93addc1a7b6d41562c9e1c80dc034458e3be8a004c69ef88 +size 19844 diff --git a/data/2025/2504_12xxx/2504.12680/images/bee8e503247e6bfc518e9e846c8a4b872251751a5d9ae2c860f2d80f79ade5f7.jpg b/data/2025/2504_12xxx/2504.12680/images/bee8e503247e6bfc518e9e846c8a4b872251751a5d9ae2c860f2d80f79ade5f7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e214b7f373961f7b5aa914b4aeaa02d45525059c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/bee8e503247e6bfc518e9e846c8a4b872251751a5d9ae2c860f2d80f79ade5f7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1f8c52b28c9be3ef405555423e6b3eb7856fdc57f05a96a14c40a419d3002a7 +size 12205 diff --git a/data/2025/2504_12xxx/2504.12680/images/c2910b624ce561498b3ac5148b8d19cdc7e11a77167631d96ba8bc9677d3060b.jpg b/data/2025/2504_12xxx/2504.12680/images/c2910b624ce561498b3ac5148b8d19cdc7e11a77167631d96ba8bc9677d3060b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1208e846c5d1289361383545b48636b254429802 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/c2910b624ce561498b3ac5148b8d19cdc7e11a77167631d96ba8bc9677d3060b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:604291a2d1bbdc25e1ae1282d4047c871c7d7064294eedd8124a3be45417293d +size 4779 diff --git a/data/2025/2504_12xxx/2504.12680/images/c318cdb8f4090e0a3adc8de0a1fd78af3544fb0478b79abbbdcf108a3971a0d2.jpg b/data/2025/2504_12xxx/2504.12680/images/c318cdb8f4090e0a3adc8de0a1fd78af3544fb0478b79abbbdcf108a3971a0d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64977cc399a10fc75b7644e516645f54f52f7e1e --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/c318cdb8f4090e0a3adc8de0a1fd78af3544fb0478b79abbbdcf108a3971a0d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:983caac5ba0f4c664aca1253153f2428d0072cd1bb212e4fcb5fee5f400b1295 +size 109454 diff --git a/data/2025/2504_12xxx/2504.12680/images/cc6240495caac68e3f50c7419676e1d867771a1e8a1581286196ee02a7f11458.jpg b/data/2025/2504_12xxx/2504.12680/images/cc6240495caac68e3f50c7419676e1d867771a1e8a1581286196ee02a7f11458.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f06c6a20bcb0654f65c0f83b992f57be5c43601 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/cc6240495caac68e3f50c7419676e1d867771a1e8a1581286196ee02a7f11458.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:addc5e1df01017c456f686267b480e792a1d22ef0e0010c6b7b69a84e744543f +size 3016 diff --git a/data/2025/2504_12xxx/2504.12680/images/ced17e7750b58424a1d929303c5985c60e050fc2f686bc41dfd7e71d7563fb33.jpg b/data/2025/2504_12xxx/2504.12680/images/ced17e7750b58424a1d929303c5985c60e050fc2f686bc41dfd7e71d7563fb33.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df3899aa08af53ed986d33f14fa5d4bd1962446c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/ced17e7750b58424a1d929303c5985c60e050fc2f686bc41dfd7e71d7563fb33.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80a9d59de1fdbe853b5e0b0df0fedd9962f75bad0b8eedd9c2ed7b75ccac8288 +size 3790 diff --git a/data/2025/2504_12xxx/2504.12680/images/d3863b9148a96e8d65b704fc34cde1fe3cea544d324697581654052ba2668f19.jpg b/data/2025/2504_12xxx/2504.12680/images/d3863b9148a96e8d65b704fc34cde1fe3cea544d324697581654052ba2668f19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..481521063a0292c5aaffb726295d1e5ad0287750 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/d3863b9148a96e8d65b704fc34cde1fe3cea544d324697581654052ba2668f19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fbc8fd3fbdba37a7315055cb0a0e6bcc30c32f301354c002e88a1e916cffb9f +size 16915 diff --git a/data/2025/2504_12xxx/2504.12680/images/df726cf50d053a9e9d1c8e05f446d5f41a73699eca043c6c69addb5422e88baa.jpg b/data/2025/2504_12xxx/2504.12680/images/df726cf50d053a9e9d1c8e05f446d5f41a73699eca043c6c69addb5422e88baa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b9dadd185fb13f5b9c9de3502d79f145bc8ddf5 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/df726cf50d053a9e9d1c8e05f446d5f41a73699eca043c6c69addb5422e88baa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3bbe004d8ab03479c5342ea621c7e18b3332e19308e80128a1685d75e464d69 +size 3958 diff --git a/data/2025/2504_12xxx/2504.12680/images/e375db0edc369c6333853ce6daa2a93a25cdd6f89e88aa53d0a478663169f89c.jpg b/data/2025/2504_12xxx/2504.12680/images/e375db0edc369c6333853ce6daa2a93a25cdd6f89e88aa53d0a478663169f89c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a3f71df21247961370a1bfa28d345f0f650f188 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/e375db0edc369c6333853ce6daa2a93a25cdd6f89e88aa53d0a478663169f89c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bd9860e90d59c35cf9dfa43b486ee2c4cc9559b06469168f113bb8ee0b81acd +size 3425 diff --git a/data/2025/2504_12xxx/2504.12680/images/e7bf82e822e7498de48ef562eb3c67e6695560386cac202714327de0b66f57cf.jpg b/data/2025/2504_12xxx/2504.12680/images/e7bf82e822e7498de48ef562eb3c67e6695560386cac202714327de0b66f57cf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd85c71bb86791d973af124a94e4dfbb603be2e0 --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/images/e7bf82e822e7498de48ef562eb3c67e6695560386cac202714327de0b66f57cf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:482e120ea2f836e20be5b708653391c5627b51b36e2cc040db1056a1a86aa413 +size 2822 diff --git a/data/2025/2504_12xxx/2504.12680/layout.json b/data/2025/2504_12xxx/2504.12680/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..2d90a8994485a219906e86b8e474eb0480f3ab9c --- /dev/null +++ b/data/2025/2504_12xxx/2504.12680/layout.json @@ -0,0 +1,12525 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 91, + 79, + 520, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 79, + 520, + 140 + ], + "spans": [ + { + "bbox": [ + 91, + 79, + 520, + 140 + ], + "type": "text", + "content": "Embodied-R: Collaborative Framework for Activating Embodied Spatial Reasoning in Foundation Models via Reinforcement Learning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 115, + 149, + 497, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 149, + 497, + 178 + ], + "spans": [ + { + "bbox": [ + 115, + 149, + 497, + 178 + ], + "type": "text", + "content": "Baining Zhao*, Ziyou Wang*, Jianjie Fang*, Chen Gao†, Fanghang Man, Jinqiang Cui, Xin Wang, Xinlei Chen†, Yong Li, Wenwu Zhu" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 255, + 179, + 356, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 255, + 179, + 356, + 193 + ], + "spans": [ + { + "bbox": [ + 255, + 179, + 356, + 193 + ], + "type": "text", + "content": "Tsinghua University" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 205, + 193, + 217, + 205 + ], + "blocks": [ + { + "bbox": [ + 205, + 193, + 217, + 205 + ], + "lines": [ + { + "bbox": [ + 205, + 193, + 217, + 205 + ], + "spans": [ + { + "bbox": [ + 205, + 193, + 217, + 205 + ], + "type": "image", + "image_path": "260ca3540d2ad6cd03f0d4ebb90864c70e7bb2a3f3d5777757b66e28572d71f1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 220, + 194, + 278, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 194, + 278, + 206 + ], + "spans": [ + { + "bbox": [ + 220, + 194, + 278, + 206 + ], + "type": "text", + "content": "Project Page" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 367, + 192, + 380, + 204 + ], + "blocks": [ + { + "bbox": [ + 367, + 192, + 380, + 204 + ], + "lines": [ + { + "bbox": [ + 367, + 192, + 380, + 204 + ], + "spans": [ + { + "bbox": [ + 367, + 192, + 380, + 204 + ], + "type": "image", + "image_path": "7466a5b5cddbe1975919ac7f8fb16269bedbff7a95fb25f6839397ba74145802.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 381, + 194, + 406, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 381, + 194, + 406, + 205 + ], + "spans": [ + { + "bbox": [ + 381, + 194, + 406, + 205 + ], + "type": "text", + "content": "Code" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 168, + 224, + 190, + 233 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 168, + 224, + 190, + 233 + ], + "spans": [ + { + "bbox": [ + 168, + 224, + 190, + 233 + ], + "type": "text", + "content": "Tasks" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 67, + 238, + 184, + 300 + ], + "blocks": [ + { + "bbox": [ + 67, + 238, + 184, + 300 + ], + "lines": [ + { + "bbox": [ + 67, + 238, + 184, + 300 + ], + "spans": [ + { + "bbox": [ + 67, + 238, + 184, + 300 + ], + "type": "image", + "image_path": "b26be9858af5be7bd451a3520f190c77d197a842af79e537534c1d3cdadee387.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 186, + 239, + 236, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 239, + 236, + 246 + ], + "spans": [ + { + "bbox": [ + 186, + 239, + 236, + 246 + ], + "type": "text", + "content": "① Landmark Position" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 194, + 247, + 280, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 247, + 280, + 251 + ], + "spans": [ + { + "bbox": [ + 194, + 247, + 280, + 251 + ], + "type": "text", + "content": "what is your current position relative to [landmark] in" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 194, + 251, + 233, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 251, + 233, + 255 + ], + "spans": [ + { + "bbox": [ + 194, + 251, + 233, + 255 + ], + "type": "text", + "content": "[navigation instruction]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 187, + 259, + 252, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 259, + 252, + 266 + ], + "spans": [ + { + "bbox": [ + 187, + 259, + 252, + 266 + ], + "type": "text", + "content": "② Counterfactual Reasoning" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 187, + 266, + 293, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 266, + 293, + 270 + ], + "spans": [ + { + "bbox": [ + 187, + 266, + 293, + 270 + ], + "type": "text", + "content": "Can you still reach destination if moving in another direction?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 187, + 274, + 239, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 274, + 239, + 280 + ], + "spans": [ + { + "bbox": [ + 187, + 274, + 239, + 280 + ], + "type": "text", + "content": "3Progress Evaluation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 187, + 280, + 273, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 280, + 273, + 286 + ], + "spans": [ + { + "bbox": [ + 187, + 280, + 273, + 286 + ], + "type": "text", + "content": "Which step the navigation is currently perform in" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 187, + 293, + 236, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 293, + 236, + 300 + ], + "spans": [ + { + "bbox": [ + 187, + 293, + 236, + 300 + ], + "type": "text", + "content": "4 Action Generation" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 187, + 300, + 283, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 300, + 283, + 305 + ], + "spans": [ + { + "bbox": [ + 187, + 300, + 283, + 305 + ], + "type": "text", + "content": "What is your next action given [navigation instruction]?" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 187, + 313, + 233, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 313, + 233, + 319 + ], + "spans": [ + { + "bbox": [ + 187, + 313, + 233, + 319 + ], + "type": "inline_equation", + "content": "⑤" + }, + { + "bbox": [ + 187, + 313, + 233, + 319 + ], + "type": "text", + "content": " Relative Distance" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 194, + 319, + 260, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 319, + 260, + 325 + ], + "spans": [ + { + "bbox": [ + 194, + 319, + 260, + 325 + ], + "type": "text", + "content": "Which object is the closest to [object A]?" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 187, + 329, + 235, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 329, + 235, + 335 + ], + "spans": [ + { + "bbox": [ + 187, + 329, + 235, + 335 + ], + "type": "text", + "content": "6 Relative Direction" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 194, + 335, + 283, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 335, + 283, + 340 + ], + "spans": [ + { + "bbox": [ + 194, + 335, + 283, + 340 + ], + "type": "text", + "content": "If you are standing by [object A] and facing [object B], is" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 194, + 340, + 260, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 340, + 260, + 345 + ], + "spans": [ + { + "bbox": [ + 194, + 340, + 260, + 345 + ], + "type": "text", + "content": "the [object C] to your left, right, or back?" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 187, + 348, + 228, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 348, + 228, + 354 + ], + "spans": [ + { + "bbox": [ + 187, + 348, + 228, + 354 + ], + "type": "text", + "content": "7 Route Planning" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 194, + 354, + 244, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 354, + 244, + 359 + ], + "spans": [ + { + "bbox": [ + 194, + 354, + 244, + 359 + ], + "type": "text", + "content": "How to navigate to [object A]?" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 187, + 363, + 236, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 363, + 236, + 369 + ], + "spans": [ + { + "bbox": [ + 187, + 363, + 236, + 369 + ], + "type": "text", + "content": "Appearance Order" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 187, + 369, + 286, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 187, + 369, + 286, + 374 + ], + "spans": [ + { + "bbox": [ + 187, + 369, + 286, + 374 + ], + "type": "text", + "content": "What will be the first-time appearance order of [object A," + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 194, + 375, + 252, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 375, + 252, + 380 + ], + "spans": [ + { + "bbox": [ + 194, + 375, + 252, + 380 + ], + "type": "text", + "content": "object B, object C] in your memory?" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 194, + 384, + 203, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 384, + 203, + 388 + ], + "spans": [ + { + "bbox": [ + 194, + 384, + 203, + 388 + ], + "type": "text", + "content": "··" + } + ] + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 292, + 270, + 348, + 337 + ], + "blocks": [ + { + "bbox": [ + 292, + 270, + 348, + 337 + ], + "lines": [ + { + "bbox": [ + 292, + 270, + 348, + 337 + ], + "spans": [ + { + "bbox": [ + 292, + 270, + 348, + 337 + ], + "type": "image", + "image_path": "8066252818aa0f756bf50e09469d3ee84df1cccc7b9c939aa9775510b37a56f7.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 296, + 348, + 345, + 357 + ], + "lines": [ + { + "bbox": [ + 296, + 348, + 345, + 357 + ], + "spans": [ + { + "bbox": [ + 296, + 348, + 345, + 357 + ], + "type": "text", + "content": "Embodied-R" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 30 + }, + { + "bbox": [ + 427, + 224, + 470, + 235 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 427, + 224, + 470, + 235 + ], + "spans": [ + { + "bbox": [ + 427, + 224, + 470, + 235 + ], + "type": "text", + "content": "Reasoning" + } + ] + } + ], + "index": 32 + }, + { + "type": "image", + "bbox": [ + 356, + 236, + 542, + 254 + ], + "blocks": [ + { + "bbox": [ + 356, + 236, + 542, + 254 + ], + "lines": [ + { + "bbox": [ + 356, + 236, + 542, + 254 + ], + "spans": [ + { + "bbox": [ + 356, + 236, + 542, + 254 + ], + "type": "image", + "image_path": "a7afab221f6d6c3288ef9526fa3ff072372cb905f27e3f3fa7b35a2294a9b74a.jpg" + } + ] + } + ], + "index": 33, + "angle": 0, + "type": "image_body" + } + ], + "index": 33 + }, + { + "bbox": [ + 358, + 256, + 530, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 256, + 530, + 274 + ], + "spans": [ + { + "bbox": [ + 358, + 256, + 530, + 274 + ], + "type": "text", + "content": "[Example] Question: Navigation Instruction given at initial position: [Observe around, then fly towards the road, then turn left and land on the roof of the building on the left]. What are you doing now?" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 358, + 275, + 514, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 275, + 514, + 281 + ], + "spans": [ + { + "bbox": [ + 358, + 275, + 514, + 281 + ], + "type": "text", + "content": "Choose: A. I look around the square area. B. I fly towards the road." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 378, + 282, + 511, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 282, + 511, + 288 + ], + "spans": [ + { + "bbox": [ + 378, + 282, + 511, + 288 + ], + "type": "text", + "content": "C. I turn left and land on the roof of the building on the left." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 378, + 289, + 477, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 289, + 477, + 294 + ], + "spans": [ + { + "bbox": [ + 378, + 289, + 477, + 294 + ], + "type": "text", + "content": "D. I fly over the park. E. I land." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 361, + 297, + 380, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 361, + 297, + 380, + 308 + ], + "spans": [ + { + "bbox": [ + 361, + 297, + 380, + 308 + ], + "type": "text", + "content": "E.I land" + } + ] + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 385, + 297, + 397, + 310 + ], + "blocks": [ + { + "bbox": [ + 385, + 297, + 397, + 310 + ], + "lines": [ + { + "bbox": [ + 385, + 297, + 397, + 310 + ], + "spans": [ + { + "bbox": [ + 385, + 297, + 397, + 310 + ], + "type": "image", + "image_path": "8fe50a38bdcaff17c4598a1be54b92b276eda1fc2d6c11ae4b445b89417288e8.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "bbox": [ + 406, + 297, + 446, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 406, + 297, + 446, + 303 + ], + "spans": [ + { + "bbox": [ + 406, + 297, + 446, + 303 + ], + "type": "text", + "content": "Fast-Thinking" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 490, + 309, + 533, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 490, + 309, + 533, + 316 + ], + "spans": [ + { + "bbox": [ + 490, + 309, + 533, + 316 + ], + "type": "text", + "content": "Slow-Thinking" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 358, + 319, + 531, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 358, + 319, + 531, + 324 + ], + "spans": [ + { + "bbox": [ + 358, + 319, + 531, + 324 + ], + "type": "text", + "content": "Think: Let's analyze the event sequence and the instruction step by step:" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 357, + 325, + 493, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 325, + 493, + 331 + ], + "spans": [ + { + "bbox": [ + 357, + 325, + 493, + 331 + ], + "type": "text", + "content": "1. Initial Position: I start by observing around the square area." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 357, + 331, + 481, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 331, + 481, + 337 + ], + "spans": [ + { + "bbox": [ + 357, + 331, + 481, + 337 + ], + "type": "text", + "content": "2. Next Step: After observing, I fly towards the highway." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 357, + 338, + 488, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 338, + 488, + 343 + ], + "spans": [ + { + "bbox": [ + 357, + 338, + 488, + 343 + ], + "type": "text", + "content": "3. Subsequent Step: Once at the highway, I should turn left." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 357, + 344, + 517, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 344, + 517, + 350 + ], + "spans": [ + { + "bbox": [ + 357, + 344, + 517, + 350 + ], + "type": "text", + "content": "4. Final Step: Finally, I should land on the roof of the building on the left" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 357, + 350, + 408, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 350, + 408, + 357 + ], + "spans": [ + { + "bbox": [ + 357, + 350, + 408, + 357 + ], + "type": "text", + "content": "From the video frames:" + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 357, + 358, + 522, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 358, + 522, + 364 + ], + "spans": [ + { + "bbox": [ + 357, + 358, + 522, + 364 + ], + "type": "text", + "content": "- The initial frames show buildings and a square area, indicating the phase." + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 357, + 365, + 541, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 365, + 541, + 370 + ], + "spans": [ + { + "bbox": [ + 357, + 365, + 541, + 370 + ], + "type": "text", + "content": "- The subsequent frames show a transition from the square area to a broader view." + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 357, + 372, + 527, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 357, + 372, + 527, + 376 + ], + "spans": [ + { + "bbox": [ + 357, + 372, + 527, + 376 + ], + "type": "text", + "content": "- The later frames depict a clear view of the road and surrounding buildings." + } + ] + } + ], + "index": 50 + }, + { + "type": "image", + "bbox": [ + 358, + 379, + 368, + 391 + ], + "blocks": [ + { + "bbox": [ + 358, + 379, + 368, + 391 + ], + "lines": [ + { + "bbox": [ + 358, + 379, + 368, + 391 + ], + "spans": [ + { + "bbox": [ + 358, + 379, + 368, + 391 + ], + "type": "image", + "image_path": "6b2092ae3ac9d8a38a7043afa27952f1f670fafcd1820ad106db5ceb62650bc1.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 401, + 559, + 424 + ], + "lines": [ + { + "bbox": [ + 50, + 401, + 559, + 424 + ], + "spans": [ + { + "bbox": [ + 50, + 401, + 559, + 424 + ], + "type": "text", + "content": "Figure 1: Embodied spatial reasoning: tasks and thinking process. Challenging tasks from public embodied video datasets are identified, encompassing both indoor and outdoor scenarios. We introduce slow-thinking to improve reasoning performance." + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + } + ], + "index": 51 + }, + { + "bbox": [ + 369, + 384, + 443, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 384, + 443, + 389 + ], + "spans": [ + { + "bbox": [ + 369, + 384, + 443, + 389 + ], + "type": "text", + "content": "Answer: B. I fly towards the road." + } + ] + } + ], + "index": 52 + }, + { + "type": "image", + "bbox": [ + 450, + 380, + 461, + 391 + ], + "blocks": [ + { + "bbox": [ + 450, + 380, + 461, + 391 + ], + "lines": [ + { + "bbox": [ + 450, + 380, + 461, + 391 + ], + "spans": [ + { + "bbox": [ + 450, + 380, + 461, + 391 + ], + "type": "image", + "image_path": "0690f619416d8140759d8bfe85c98be41326383f16d97579a0edd83c69a8451f.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + } + ], + "index": 53 + }, + { + "bbox": [ + 51, + 429, + 96, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 429, + 96, + 439 + ], + "spans": [ + { + "bbox": [ + 51, + 429, + 96, + 439 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 55 + }, + { + "bbox": [ + 50, + 442, + 297, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 442, + 297, + 641 + ], + "spans": [ + { + "bbox": [ + 50, + 442, + 297, + 641 + ], + "type": "text", + "content": "Humans can perceive and reason about spatial relationships from sequential visual observations, such as egocentric video streams. However, how pretrained models acquire such abilities, especially high-level reasoning, remains unclear. This paper introduces Embodied-R, a collaborative framework combining large-scale Vision-Language Models (VLMs) for perception and small-scale Language Models (LMs) for reasoning. Using Reinforcement Learning (RL) with a novel reward system considering think-answer logical consistency, the model achieves slow-thinking capabilities with limited computational resources. After training on only 5k embodied video samples, Embodied-R with a 3B LM matches state-of-the-art multimodal reasoning models (OpenAI-o1, Gemini-2.5-pro) on both in-distribution and out-of-distribution embodied spatial reasoning tasks. Embodied-R also exhibits emergent thinking patterns such as systematic analysis and contextual integration. We further explore research questions including response length, training on VLM, strategies for reward design, and differences in model generalization after SFT (Supervised Fine-Tuning) and RL training." + } + ] + } + ], + "index": 56 + }, + { + "bbox": [ + 51, + 662, + 134, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 662, + 134, + 673 + ], + "spans": [ + { + "bbox": [ + 51, + 662, + 134, + 673 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 57 + }, + { + "bbox": [ + 50, + 676, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 676, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 676, + 294, + 710 + ], + "type": "text", + "content": "On the path toward Artificial General Intelligence (AGI) [17], we hope that pre-trained foundation models can not only perform tasks such as dialogue and image understanding in the cyber world [2, 44]" + } + ] + } + ], + "index": 58 + }, + { + "bbox": [ + 313, + 429, + 560, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 429, + 560, + 517 + ], + "spans": [ + { + "bbox": [ + 313, + 429, + 560, + 517 + ], + "type": "text", + "content": "but also develop human-like embodied spatial cognition in the three-dimensional physical world, enabling them to perceive, think, and move [4, 32]. The fundamental way humans achieve spatial cognition is through continuous, dynamic visual observations, akin to video streams [26, 30]. For example, by observing their surroundings, humans can infer their position relative to nearby objects. Similarly, based on historical visual observations, humans can determine the actions they should take to reach a target destination." + } + ] + } + ], + "index": 59 + }, + { + "bbox": [ + 313, + 517, + 559, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 517, + 559, + 681 + ], + "spans": [ + { + "bbox": [ + 313, + 517, + 559, + 681 + ], + "type": "text", + "content": "Visual spatial cognition can be divided into two levels: perception and reasoning [51]. Perception refers to \"what is seen\", characterized by direct, low-level tasks such as object recognition, edge detection, or color differentiation [52]. Reasoning, on the other hand, involves \"what is understood\" and \"what actions to take\", which are indirect and higher-level tasks requiring logical inference and knowledge integration [62]. Examples of reasoning include \"Where did I come from?\" (e.g., recalling historical movement trajectories [36]), \"Where am I?\" (e.g., inferring the spatial relationships between nearby objects and distances [5]), and \"Where do I want to go?\" (e.g., planning actions and deciding movements to reach a destination [8]). While most existing research focuses on improving the perception capabilities of foundation models [6, 11], with notable progress, their spatial reasoning abilities remain limited [9, 58], and methods for enhancement are largely unexplored." + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 314, + 681, + 559, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 681, + 559, + 703 + ], + "spans": [ + { + "bbox": [ + 314, + 681, + 559, + 703 + ], + "type": "text", + "content": "Specifically, video-based spatial reasoning poses several challenges, as follows:" + } + ] + } + ], + "index": 61 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.12680v1 [cs.AI] 17 Apr 2025" + } + ] + } + ], + "index": 0 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 84, + 295, + 435 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 51, + 84, + 294, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 84, + 294, + 139 + ], + "spans": [ + { + "bbox": [ + 51, + 84, + 294, + 139 + ], + "type": "text", + "content": "- Reasoning is always built upon perception [19, 32]. For the studied problem, continuous visual observations impose higher demands on perception. Reasoning cannot be well achieved with faulty perceptions or hallucinations [53]. It is challenging to reason when it is already hard to perceive from the videos." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 140, + 295, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 140, + 295, + 249 + ], + "spans": [ + { + "bbox": [ + 51, + 140, + 295, + 249 + ], + "type": "text", + "content": "- Video data naturally involves complex spatio-temporal relationships, requiring the discovery of object associations across frames and the extraction of semantics relevant to the reasoning task [16]. For instance, to navigate to a destination outside the current field of view, one must infer their location from historical visual observations, build a mental map of the environment, develop a high-level plan to determine the direction, and finally decide on specific actions to execute. Existing supervised fine-tuning (SFT) training methods lack supervision for the reasoning process, making it difficult to handle such reasoning tasks [62]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 250, + 294, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 250, + 294, + 435 + ], + "spans": [ + { + "bbox": [ + 51, + 250, + 294, + 435 + ], + "type": "text", + "content": "- Embodied visual observations have distinct characteristics. First, understanding disembodied videos, such as movies or TV shows, primarily emphasizes the content within the video, often from a broad and objective perspective [27]. In contrast, egocentric videos focus on understanding the relationship between the observer and the surrounding environment, often from a constrained first-person perspective [22]. Second, embodied continuous visual observations are generated over time, indicating that embodied perception should rely on sequential inputs rather than aggregating all visual observations for a single input after a prolonged period [31]. Finally, due to the continuity of motion in the physical world, egocentric visual observations also exhibit spatial continuity, meaning there is significant redundancy and repetition between frames. Consequently, directly applying existing multimodal large language models (MLLMs) to embodied videos leads to issues, including loss of generalization and input token limits caused by excessive redundant frames [1, 29]." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 446, + 295, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 446, + 295, + 544 + ], + "spans": [ + { + "bbox": [ + 50, + 446, + 295, + 544 + ], + "type": "text", + "content": "Recently, the impressive performance of OpenAI's o1/o3 [38] and DeepSeek-R1 [24] in solving complex reasoning problems(e.g., mathematics, coding, science, etc.) has drawn attention to reinforcement learning (RL) techniques. By incorporating the chain-of-thought (CoT) reasoning process into post-training, large language models (LLMs) demonstrate a \"slow-thinking\" mode, where they reason thoroughly before generating responses [45, 55]. Inspired by this, we attempt to introduce \"slow thinking\" into embodied video-based spatial reasoning tasks, as shown in Figure 1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 545, + 295, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 545, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 50, + 545, + 295, + 665 + ], + "type": "text", + "content": "This brings a new challenge: the trade-off between model size and computational cost. Existing studies suggest a strong correlation between multimodal understanding/perception capabilities and model size [7, 20, 56]. Since reasoning builds on perception, larger vision-language foundation models should be used as the starting point for training. However, increasing model size leads to often unacceptable computational costs. Additionally, video inputs map to long token sequences, further raising computational demands. Is there a way to leverage the perception capabilities of large-scale models while developing embodied reasoning abilities at a lower computational cost?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 666, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 666, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 666, + 295, + 710 + ], + "type": "text", + "content": "Inspired by neuroscience [64], spatial perception and reasoning involve distinct brain regions: visual perception occurs in the visual areas of the occipital lobe [13], basic spatial understanding in the parietal lobe [18], and complex spatial reasoning in the prefrontal" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 84, + 559, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 281 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 281 + ], + "type": "text", + "content": "cortex [14]. This inspired the design of a collaborative framework with two main components: a large-scale vision-language model (VLM) for perception and a small-scale language model (LM) for reasoning. Based on the continuity of observations, we first propose a key-frame extractor to retain critical information while reducing computational costs. Using a VLM, we sequentially extract semantic information from the frames, which simulates real-world online reasoning while effectively managing the input token length of VLMs for long video inputs. Finally, the semantic information and reasoning question are fed into the small-scale language model, which outputs the reasoning process and final answers. The small-scale language model is trained with RL, where the reward modeling not only incorporates rule-based rewards inspired by Deepseek-R1-Zero [24] but, more importantly, introduces a novel reward for the logical consistency of the reasoning process. In the experiments, we explore seven research questions, covering the framework's performance, RL's role in activating embodied spatial reasoning, and out-of-distribution generalization capabilities." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 324, + 282, + 556, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 282, + 556, + 293 + ], + "spans": [ + { + "bbox": [ + 324, + 282, + 556, + 293 + ], + "type": "text", + "content": "In general, the main contributions of this paper are as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 315, + 304, + 559, + 490 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 315, + 304, + 558, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 304, + 558, + 369 + ], + "spans": [ + { + "bbox": [ + 315, + 304, + 558, + 369 + ], + "type": "text", + "content": "- We propose a collaborative framework for large-scale and small-scale foundation models to address spatial reasoning in the video modality. By decoupling perception and reasoning, the framework leverages the perceptual strength of large-scale foundation models while efficiently enhancing the reasoning capabilities of smaller models in a computationally resource-friendly manner." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 370, + 558, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 370, + 558, + 423 + ], + "spans": [ + { + "bbox": [ + 315, + 370, + 558, + 423 + ], + "type": "text", + "content": "- This is the first work to employ reinforcement learning (RL) to enhance the embodied spatial reasoning abilities of foundation models. Specifically, we introduce a novel logical consistency reward, which improves the alignment between reasoning processes and generated answers." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 424, + 559, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 424, + 559, + 490 + ], + "spans": [ + { + "bbox": [ + 315, + 424, + 559, + 490 + ], + "type": "text", + "content": "- Our proposed Embodied-R achieves performance comparable to state-of-the-art multimodal large language models (e.g., OpenAI-o1/Gemini-2.5-Pro) on both in-distribution and out-of-distribution benchmarks. We further investigate research questions including the generalization comparison between models trained by SFT & RL, reward design strategies, etc." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 509, + 403, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 509, + 403, + 520 + ], + "spans": [ + { + "bbox": [ + 315, + 509, + 403, + 520 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 523, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 523, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 523, + 559, + 710 + ], + "type": "text", + "content": "Large Language Model Reasoning. Recently, enhancing reasoning capabilities has become a key focus in large model technologies, demonstrating remarkable performance on tasks such as mathematical and logical problem-solving [25, 47, 57]. Following the release of OpenAI's o1 [38], numerous studies have proposed various technical approaches to achieve similar functionalities, including Chain-of-Thought (CoT) [54], Monte Carlo Tree Search (MCTS) [23, 60], distillation [35], rejection sampling combined with supervised fin-tuning (SFT) or Direct Preference Optimization (DPO) [40], among others. Furthermore, Deepseek-r1 [24] introduced a method to foster the emergence of reasoning abilities in large language models (LLMs) through rule-based rewards combined with reinforcement learning. Similarly, Kimi k1.5 [45] proposed a comparable approach, presenting various training techniques, such as curriculum learning. This reinforcement learning paradigm has sparked significant interest, with subsequent works successfully reproducing related results [55, 59]." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 237 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 237 + ], + "type": "text", + "content": "Embodied Spatial Reasoning with VLMs. Inspired by the generality of foundation models across various domains [2, 3], embodied intelligence aims to develop agents that utilize large multimodal models as their \"brains\" to achieve perception, navigation, and manipulation in the 3D physical world [15, 41]. In terms of input, human visual-spatial perception is more akin to continuous RGB observations, similar to video streams [12, 42], rather than static images [48] or point clouds [52]. Several embodied video benchmarks [58] demonstrate that, while perception tasks are relatively well-addressed, spatial reasoning tasks—such as spatial relationship inference, navigation, and planning—remain highly challenging. However, existing research [16, 43] on video reasoning primarily focuses on disembodied content reasoning, with little emphasis on scenarios involving embodied continuous visual inputs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 238, + 294, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 238, + 294, + 370 + ], + "spans": [ + { + "bbox": [ + 50, + 238, + 294, + 370 + ], + "type": "text", + "content": "Collaboration between large and small models. Existing research primarily focuses on addressing the resource consumption and privacy risks associated with large models, as well as the efficiency and performance advantages of small models in specific scenarios [50]. Small models can assist large models in data selection, prompt optimization, and reasoning enhancement [28, 61]. The use of small models to detect hallucinations and privacy leakage is explored in [49, 63], improving overall system reliability. While our work shares the goal of reducing computational resource demands, it differs by emphasizing the complementary roles of large-scale VLMs in perception and small-scale LMs in enhancing embodied spatial reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 51, + 380, + 195, + 392 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 380, + 195, + 392 + ], + "spans": [ + { + "bbox": [ + 51, + 380, + 195, + 392 + ], + "type": "text", + "content": "3 The Embodied-R Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 395, + 295, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 395, + 295, + 440 + ], + "spans": [ + { + "bbox": [ + 50, + 395, + 295, + 440 + ], + "type": "text", + "content": "We first define the problem of embodied spatial reasoning. Subsequently, we introduce the VLM-based perception module and the LM-based reasoning module. The collaborative framework is shown in Figure 2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 449, + 186, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 449, + 186, + 460 + ], + "spans": [ + { + "bbox": [ + 51, + 449, + 186, + 460 + ], + "type": "text", + "content": "3.1 Problem Formulation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "spans": [ + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": "In the physical world, an agent moves through space, generating a sequence of video frames (continuous visual observations) " + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "inline_equation", + "content": "\\mathbf{f} = [f_0, f_1, \\dots, f_T]" + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": ". Suppose a spatial reasoning problem is denoted as " + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": ". Our goal is to build a model that takes " + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "inline_equation", + "content": "\\mathbf{f}" + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": " as inputs and outputs an answer " + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": ". The answer " + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": " is considered correct if it is semantically consistent with the ground truth " + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 50, + 464, + 294, + 541 + ], + "type": "text", + "content": "; otherwise, it is deemed incorrect." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 552, + 249, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 552, + 249, + 565 + ], + "spans": [ + { + "bbox": [ + 51, + 552, + 249, + 565 + ], + "type": "text", + "content": "3.2 Large-Scale VLM-based Perception" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 567, + 294, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 567, + 294, + 676 + ], + "spans": [ + { + "bbox": [ + 50, + 567, + 294, + 676 + ], + "type": "text", + "content": "3.2.1 Key-Frame Extractor. As the agent moves continuously in space, high sampling frequencies result in significant overlap between consecutive frames. On one hand, the VLM relies on changes in the static objects within the environment across frames to infer the agent's pose variation. On the other hand, excessive overlap between frames leads to increased inference costs for both the VLM and LLM. To address this, we designed a key-frame extractor tailored to the characteristics of embodied videos, selecting key frames that retain overlap while ensuring sufficient information gain between them." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 677, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 677, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 677, + 294, + 710 + ], + "type": "text", + "content": "The extraction of key-frames is based on the overlap of visual fields caused by motion continuity. When the agent moves forward, the visual content in the latter frame is expected to overlap with a" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 84, + 559, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 162 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 162 + ], + "type": "text", + "content": "portion of the former frame, and the reverse is true when moving backward. Similarly, during left or right rotations, the latter frame should partially overlap with the former frame in the horizontal direction, and during upward or downward rotations, the overlap occurs in the vertical direction. Given that the sampling frequency of visual observations is typically much higher than the agent's motion speed, frames generally exhibit significant overlap." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "spans": [ + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": "Specifically, a perspective transformation is used to model the geometric relationship between frames. Assuming " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": " is a key-frame, to determine whether " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": " should also be considered a keyframe, keypoints and descriptors are calculated from " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": " using the Oriented FAST and Rotated BRIEF (ORB) algorithm. Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames and the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix. The overlap ratio between two frames is then computed. If overlap ratio is less than a predefined threshold, it indicates significant visual changes between the frames, and " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": " is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "f_{t+2}" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": ". This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as " + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "inline_equation", + "content": "\\mathbf{f}' = [f_{k_0}, f_{k_1}, \\dots, f_{k_n}]" + }, + { + "bbox": [ + 313, + 162, + 559, + 380 + ], + "type": "text", + "content": ", the keyframe extraction process can be summarized as:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 398, + 387, + 559, + 399 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 398, + 387, + 559, + 399 + ], + "spans": [ + { + "bbox": [ + 398, + 387, + 559, + 399 + ], + "type": "interline_equation", + "content": "\\mathbf {f} ^ {\\prime} = \\mathrm {K} - \\operatorname {E x t r a c t o r} (\\mathbf {f}). \\tag {1}", + "image_path": "cc6240495caac68e3f50c7419676e1d867771a1e8a1581286196ee02a7f11458.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 406, + 559, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 406, + 559, + 548 + ], + "spans": [ + { + "bbox": [ + 313, + 406, + 559, + 548 + ], + "type": "text", + "content": "3.2.2 Embodied Semantic Representation. Since perceptual capability is positively correlated with model size [27, 58, 62], we employ a large-scale VLM to process visual inputs to ensure high-quality perception. The differential information of each key frame is described sequentially. This approach provides two key benefits: 1) The sequential and dynamic processing aligns better with the characteristics of embodied scenarios, where visual observations are continuously generated over time. At each moment, the model should integrate historical semantic representations with the latest visual observations, rapidly updating the semantic understanding of spatial perception. 2) It facilitates the handling of long videos by avoiding the input token limitations that arise when all frames are processed simultaneously by the VLM." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 548, + 559, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 548, + 559, + 594 + ], + "spans": [ + { + "bbox": [ + 313, + 548, + 559, + 594 + ], + "type": "text", + "content": "Specifically, for the first frame, the VLM identifies the objects present in the scene, their attributes, and their spatial locations. For subsequent frames, both the previous frame and the current frame are input into the VLM to extract key semantic representation " + }, + { + "bbox": [ + 313, + 548, + 559, + 594 + ], + "type": "inline_equation", + "content": "s_{k_j}" + }, + { + "bbox": [ + 313, + 548, + 559, + 594 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 367, + 599, + 559, + 613 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 599, + 559, + 613 + ], + "spans": [ + { + "bbox": [ + 367, + 599, + 559, + 613 + ], + "type": "interline_equation", + "content": "s _ {k _ {j}} \\sim \\psi_ {\\theta} (s | f _ {k _ {j - 1}}, f _ {k _ {j}}; q), j = 1, 2, \\dots , n, \\tag {2}", + "image_path": "626c513f3b37a8921a333604e0f80d44cc7cf961026f0bb1bb4343b8b9125637.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 618, + 440, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 618, + 440, + 630 + ], + "spans": [ + { + "bbox": [ + 314, + 618, + 440, + 630 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 618, + 440, + 630 + ], + "type": "inline_equation", + "content": "s_{k_j}" + }, + { + "bbox": [ + 314, + 618, + 440, + 630 + ], + "type": "text", + "content": " consists of three items:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 632, + 559, + 709 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 315, + 632, + 558, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 632, + 558, + 654 + ], + "spans": [ + { + "bbox": [ + 315, + 632, + 558, + 654 + ], + "type": "text", + "content": "- Action: Inferring the agent's actions based on the changes in visual observations between consecutive frames." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 654, + 559, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 654, + 559, + 687 + ], + "spans": [ + { + "bbox": [ + 315, + 654, + 559, + 687 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 315, + 654, + 559, + 687 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 315, + 654, + 559, + 687 + ], + "type": "text", + "content": " Information: Determining changes in the spatial relationships between the agent and known objects, as well as identifying whether new objects appear in the field of view." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 688, + 558, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 688, + 558, + 709 + ], + "spans": [ + { + "bbox": [ + 315, + 688, + 558, + 709 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 315, + 688, + 558, + 709 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 315, + 688, + 558, + 709 + ], + "type": "text", + "content": "-related content: Detecting whether objects or information relevant to the reasoning task appear in the latest field of view." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 82, + 561, + 355 + ], + "blocks": [ + { + "bbox": [ + 52, + 82, + 561, + 355 + ], + "lines": [ + { + "bbox": [ + 52, + 82, + 561, + 355 + ], + "spans": [ + { + "bbox": [ + 52, + 82, + 561, + 355 + ], + "type": "image", + "image_path": "b4adaf2d4183da6749bf4525fe2d5c946fa39eb4214e564e23bd8a915e471952.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 365, + 560, + 411 + ], + "lines": [ + { + "bbox": [ + 50, + 365, + 560, + 411 + ], + "spans": [ + { + "bbox": [ + 50, + 365, + 560, + 411 + ], + "type": "text", + "content": "Figure 2: The proposed Embodied-R is a collaborative embodied spatial reasoning framework integrating a Vision-Language Model (VLM) and a Language Model (LM). The separation of perception and reasoning enables us to leverage the perceptual capabilities of large-scale VLMs while training a resource-efficient small-scale LM to activate embodied reasoning through RL. Notably, we introduce a novel logical consistency reward to guide the LM in producing logically coherent reasoning and answer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 426, + 295, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 426, + 295, + 450 + ], + "spans": [ + { + "bbox": [ + 51, + 426, + 295, + 450 + ], + "type": "text", + "content": "In this way, we can extract spatial semantic representations " + }, + { + "bbox": [ + 51, + 426, + 295, + 450 + ], + "type": "inline_equation", + "content": "\\mathbf{s} = [s_{k_0}, s_{k_1}, \\dots, s_{k_n}]" + }, + { + "bbox": [ + 51, + 426, + 295, + 450 + ], + "type": "text", + "content": " from the keyframe " + }, + { + "bbox": [ + 51, + 426, + 295, + 450 + ], + "type": "inline_equation", + "content": "\\mathbf{f}'" + }, + { + "bbox": [ + 51, + 426, + 295, + 450 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 469, + 241, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 469, + 241, + 482 + ], + "spans": [ + { + "bbox": [ + 51, + 469, + 241, + 482 + ], + "type": "text", + "content": "3.3 Small-Scale LM-based Reasoning" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "spans": [ + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "text", + "content": "Given semantic perception, we can train a training-friendly small-scale language model capable of performing embodied spatial reasoning. Assuming the small-scale LM is denoted as " + }, + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "text", + "content": ", the response " + }, + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "text", + "content": " inferred from the model can be expressed as: " + }, + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "inline_equation", + "content": "o \\sim \\pi_{\\theta}(o \\mid q, s)" + }, + { + "bbox": [ + 50, + 483, + 295, + 528 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 528, + 295, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 528, + 295, + 626 + ], + "spans": [ + { + "bbox": [ + 50, + 528, + 295, + 626 + ], + "type": "text", + "content": "Our training objective is to ensure that the model adheres to the \"think-then-answer\" paradigm, where the thinking process is logical, and the answer is correct. We follow DeepSeek-R1-Zero and adopt a computationally efficient RL training strategy, Group Relative Policy Optimization (GRPO). Besides rule-based format and accuracy rewards, we propose a novel reasoning process reward tailored for embodied reasoning tasks to mitigate reward hacking and enhance the logical consistency between the reasoning process and the final answer." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": "3.3.1 Group Relative Policy Optimization. For a given query " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": " and semantic annotation " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": ", GRPO generates a group of outputs " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\{o_1, o_2, \\dots, o_G\\}" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": " using the reference policy " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": ". The reference policy typically refers to the original model not trained via GRPO. The policy model " + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\pi_\\theta" + }, + { + "bbox": [ + 50, + 643, + 295, + 710 + ], + "type": "text", + "content": " is then updated by optimizing the following objective:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 324, + 453, + 559, + 508 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 453, + 559, + 508 + ], + "spans": [ + { + "bbox": [ + 324, + 453, + 559, + 508 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {J} (\\theta) = \\mathbb {E} _ {(q, s) \\sim \\mathbb {D}, \\{o _ {i} \\} _ {i = 1} ^ {G} \\sim \\pi_ {\\text {o l d}} (o | q, s)} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\left(\\min \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q , s)}{\\pi_ {\\text {o l d}} (o _ {i} | q , s)} A _ {i}, \\right. \\right. \\right. \\tag {3} \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (o _ {i} | q , \\mathbf {s})}{\\pi_ {\\mathrm {o l d}} (o _ {i} | q , \\mathbf {s})}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i}\\right) - \\beta \\mathcal {D} _ {\\mathrm {K L}} (\\pi_ {\\theta} \\| \\pi_ {\\mathrm {r e f}})) \\Biggr ], \\\\ \\end{array}", + "image_path": "65dc4ebdfd7213c98c84a9db3c98bd4a53e477d314b772599b0f29c8fc018ae2.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "spans": [ + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "text", + "content": " are hyperparameters, and " + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\mathrm{ref}})" + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "text", + "content": " is KL divergence penalty: " + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\mathrm{KL}}(\\pi_{\\theta}||\\pi_{\\mathrm{ref}}) = \\pi_{\\mathrm{ref}}(r_i|q,\\mathbf{s})\\log \\frac{\\pi_{\\mathrm{ref}}(r_i|q,\\mathbf{s})}{\\pi_{\\theta}(r_i|q,\\mathbf{s})} -1." + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "inline_equation", + "content": "A_{i}" + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "text", + "content": " represents the advantage corresponding to the output " + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "text", + "content": " , calculated from the corresponding " + }, + { + "bbox": [ + 314, + 524, + 560, + 578 + ], + "type": "inline_equation", + "content": "\\{r_1,r_2,\\dots ,r_G\\} :A_i = \\frac{r_i - \\mathrm{mean}(\\{r_1,r_2,\\dots,r_G\\})}{\\mathrm{std}(\\{r_1,r_2,\\dots,r_G\\})}" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 314, + 589, + 560, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 589, + 560, + 665 + ], + "spans": [ + { + "bbox": [ + 314, + 589, + 560, + 665 + ], + "type": "text", + "content": "3.3.2 Reward Modeling. Reward modeling is a critical component of RL algorithms, as their design guides the direction of model optimization. We propose three types of rewards: format reward, accuracy reward, and logical consistency reward. These are designed to respectively guide the model to learn the \"think-answer\" reasoning pattern, accurate embodied spatial reasoning, and logical consistency between reasoning and the answer." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "content": "Format Reward: We aim for the model to output " + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "content": " by first producing an embodied reasoning process " + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "content": " followed by the final answer " + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "content": ". The reasoning process and answer are enclosed within " + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "inline_equation", + "content": "
" + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "inline_equation", + "content": "
" + }, + { + "bbox": [ + 314, + 666, + 559, + 710 + ], + "type": "text", + "content": " tags, respectively:" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 65, + 89, + 284, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 89, + 284, + 210 + ], + "spans": [ + { + "bbox": [ + 65, + 89, + 284, + 210 + ], + "type": "text", + "content": "Please assume the role of an agent. Given a question and a series of frames, you should first think about the reasoning process in the mind and then provide the final answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Ensure that your answer is consistent with and directly derived from your thinking process, maintaining logical coherence between the two sections. The frames represent your egocentric observations from the past to the present. Question: q. Video: f'. Assistant:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 222, + 295, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 222, + 295, + 246 + ], + "spans": [ + { + "bbox": [ + 51, + 222, + 295, + 246 + ], + "type": "text", + "content": "A regular expression is applied to evaluate whether " + }, + { + "bbox": [ + 51, + 222, + 295, + 246 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 51, + 222, + 295, + 246 + ], + "type": "text", + "content": " meets the specified requirements, thereby generating the format reward " + }, + { + "bbox": [ + 51, + 222, + 295, + 246 + ], + "type": "inline_equation", + "content": "r_i'" + }, + { + "bbox": [ + 51, + 222, + 295, + 246 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 112, + 251, + 295, + 280 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 251, + 295, + 280 + ], + "spans": [ + { + "bbox": [ + 112, + 251, + 295, + 280 + ], + "type": "interline_equation", + "content": "r _ {i} ^ {\\prime} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f f o r m a t i s c o r r e c t ;} \\\\ 0, & \\text {i f f o r m a t i s i n c o r r e c t .} \\end{array} \\right. \\tag {4}", + "image_path": "4cf39cf0bb183bad791748ef428042ca4b82baf13f46cb79e52f0b671ac61e5b.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "spans": [ + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "text", + "content": "Accuracy Reward: The accuracy reward " + }, + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "inline_equation", + "content": "r_i^{\\prime \\prime}" + }, + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "text", + "content": " model assesses whether the answer " + }, + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "text", + "content": " is semantically consistent with the ground truth " + }, + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "inline_equation", + "content": "g" + }, + { + "bbox": [ + 50, + 283, + 296, + 338 + ], + "type": "text", + "content": ". For example, multiple-choice questions typically have precise and unique answers, which can be easily extracted when the response adheres to the specified format." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 137, + 342, + 295, + 372 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 342, + 295, + 372 + ], + "spans": [ + { + "bbox": [ + 137, + 342, + 295, + 372 + ], + "type": "interline_equation", + "content": "r _ {i} ^ {\\prime \\prime} = \\left\\{ \\begin{array}{l l} 1, & a _ {i} = g; \\\\ 0, & a _ {i} \\neq g. \\end{array} \\right. \\tag {5}", + "image_path": "ced17e7750b58424a1d929303c5985c60e050fc2f686bc41dfd7e71d7563fb33.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "spans": [ + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": "Logical Consistency Reward: When using only the format reward and accuracy reward, we consistently observed hacking behaviors. Specifically, for spatial reasoning tasks where the possible answers are limited (e.g., the relative position of an object with respect to the agent's body), cases arise where an incorrect reasoning process " + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": " leads to a correct answer " + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": ", which is mistakenly assigned a positive reward. As such cases accumulate, the logical consistency of the model's responses deteriorates. To address this issue, we introduce a simple yet effective process reward. Our goal is to ensure a lower bound on logical consistency, such that the reasoning ability of " + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": " should not degrade below that of the reference model " + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": ". Therefore, when the model's answer is correct " + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "inline_equation", + "content": "(a_i = g)" + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": ", we input the question " + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": " and reasoning process " + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "inline_equation", + "content": "p_i" + }, + { + "bbox": [ + 50, + 376, + 295, + 530 + ], + "type": "text", + "content": " into the reference model without providing video frames, yielding an answer:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 137, + 535, + 294, + 548 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 535, + 294, + 548 + ], + "spans": [ + { + "bbox": [ + 137, + 535, + 294, + 548 + ], + "type": "interline_equation", + "content": "a _ {i} ^ {\\prime} \\sim \\pi_ {\\text {r e f}} (a | q, p _ {i}). \\tag {6}", + "image_path": "e7bf82e822e7498de48ef562eb3c67e6695560386cac202714327de0b66f57cf.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 552, + 295, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 552, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 50, + 552, + 295, + 586 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 50, + 552, + 295, + 586 + ], + "type": "inline_equation", + "content": "a_i'" + }, + { + "bbox": [ + 50, + 552, + 295, + 586 + ], + "type": "text", + "content": " is consistent with " + }, + { + "bbox": [ + 50, + 552, + 295, + 586 + ], + "type": "inline_equation", + "content": "a_i" + }, + { + "bbox": [ + 50, + 552, + 295, + 586 + ], + "type": "text", + "content": ", it indicates that the reasoning process can logically lead to the answer; otherwise, it reflects a logical inconsistency between the reasoning process and the answer." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 127, + 591, + 295, + 619 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 591, + 295, + 619 + ], + "spans": [ + { + "bbox": [ + 127, + 591, + 295, + 619 + ], + "type": "interline_equation", + "content": "r _ {i} ^ {\\prime \\prime \\prime} = \\left\\{ \\begin{array}{l l} 1, & a _ {i} = a _ {i} ^ {\\prime} = g; \\\\ 0, & \\text {e l s e .} \\end{array} \\right. \\tag {7}", + "image_path": "df726cf50d053a9e9d1c8e05f446d5f41a73699eca043c6c69addb5422e88baa.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 624, + 295, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 624, + 295, + 645 + ], + "spans": [ + { + "bbox": [ + 51, + 624, + 295, + 645 + ], + "type": "text", + "content": "Total Reward: The total reward is a linear combination of the three rewards mentioned above:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 123, + 651, + 295, + 665 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 651, + 295, + 665 + ], + "spans": [ + { + "bbox": [ + 123, + 651, + 295, + 665 + ], + "type": "interline_equation", + "content": "r _ {i} = \\omega_ {1} r _ {i} ^ {\\prime} + \\omega_ {2} r _ {i} ^ {\\prime \\prime} + \\omega_ {3} r _ {i} ^ {\\prime \\prime \\prime}. \\tag {8}", + "image_path": "e375db0edc369c6333853ce6daa2a93a25cdd6f89e88aa53d0a478663169f89c.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 673, + 134, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 673, + 134, + 685 + ], + "spans": [ + { + "bbox": [ + 51, + 673, + 134, + 685 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 687, + 296, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 687, + 296, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 687, + 296, + 710 + ], + "type": "text", + "content": "We first provide the details of the experimental setup and then demonstrate the following: quantitative results, qualitative results," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 84, + 559, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 84, + 559, + 107 + ], + "spans": [ + { + "bbox": [ + 314, + 84, + 559, + 107 + ], + "type": "text", + "content": "and ablation studies. These correspond to addressing the following three research questions (RQs):" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 109, + 559, + 152 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 315, + 109, + 559, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 109, + 559, + 129 + ], + "spans": [ + { + "bbox": [ + 315, + 109, + 559, + 129 + ], + "type": "text", + "content": "- RQ1: How does Embodied-R perform compared to existing video-LLMs?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 130, + 515, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 130, + 515, + 141 + ], + "spans": [ + { + "bbox": [ + 315, + 130, + 515, + 141 + ], + "type": "text", + "content": "- RQ2: Has Embodied-R learned slow-thinking?" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 142, + 528, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 142, + 528, + 152 + ], + "spans": [ + { + "bbox": [ + 315, + 142, + 528, + 152 + ], + "type": "text", + "content": "- RQ3: What are the contributions of each module?" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 163, + 441, + 176 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 163, + 441, + 176 + ], + "spans": [ + { + "bbox": [ + 315, + 163, + 441, + 176 + ], + "type": "text", + "content": "4.1 Experimental Setup" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 178, + 559, + 440 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 178, + 559, + 440 + ], + "spans": [ + { + "bbox": [ + 313, + 178, + 559, + 440 + ], + "type": "text", + "content": "4.1.1 Data Preparation. We primarily focus on spatial reasoning problems during motion within three-dimensional physical space to evaluate the effectiveness of our method. For this purpose, we selected two embodied video datasets as the main training and testing sets: VSI-Bench [58], which contains indoor first-person navigation data., and UrbanVideo-Bench [62], which consists of outdoor embodied data captured by drones navigating through aerial spaces. These datasets provide diversity in scenarios by incorporating both outdoor and indoor video data. Based on the content of the tasks, we specifically selected four distinct types of tasks from each dataset, characterized by long spatial reasoning chains and low accuracy. These tasks are formulated as multiple-choice question-answering problems, ensuring determinism in answers to facilitate RL training and allowing direct calculation of accuracy to evaluate performance. Across eight task categories, the dataset covers multiple levels of spatial reasoning, comprising a total of 5,415 QA pairs and 1,492 videos. Additionally, we include two out-of-distribution dataset, EgoSchema [34] and Egocentric task in MVBench [27]. EgoSchema is designed for task-level reasoning from a first-person perspective, with 500 QA pairs and 500 videos available in its fully open-source portion. MVBench encompasses the embodied task of egocentric navigation, comprising 200 QA pairs and 200 corresponding videos. These datasets serve to evaluate the generalization capability of the trained model." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 441, + 559, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 441, + 559, + 638 + ], + "spans": [ + { + "bbox": [ + 313, + 441, + 559, + 638 + ], + "type": "text", + "content": "To ensure comprehensive evaluation, we conducted five repeated experiments. The dataset was randomly divided into five equal parts and 5-fold cross-validation is adopted. The final testing results are averaged across the five experiments. Furthermore, we address the issue of potential semantic bias in the datasets. For instance, in action generation tasks, forward movement may inherently have a higher correctness rate than adjusting the gimbal angle, which is a characteristic of the task itself. To prevent the testing performance from being influenced by the model learning textual distribution rather than truly understanding the spatial information in video, we implement an additional filtering step for the testing set. Specifically, we train a LLM through supervised fine-tuning using only the textual QA pairs from the training set, without video inputs. If a question in the testing set can be correctly answered by the finetuned LLM but not by the original LLM, it indicates semantic bias in that QA pair. These biased QA pairs are excluded from the testing set as they fail to accurately assess the spatial reasoning capabilities of models." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 314, + 643, + 561, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 643, + 561, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 643, + 561, + 710 + ], + "type": "text", + "content": "4.1.2 Implementation Details. We use Qwen2.5-3B-Instruct [57] as the small-scale LM and Qwen2.5-VL-72B-Instruct [6] as large-scale VLM. Both training and inference processes were conducted using 8 NVIDIA A800-SXM4-40GB GPUs, with each RL training requiring approximately 90 GPU hours. Other key hyperparameters for training are as follows: learning rate: 5e-7, temperature:" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 52, + 125, + 366, + 482 + ], + "blocks": [ + { + "bbox": [ + 50, + 82, + 560, + 118 + ], + "lines": [ + { + "bbox": [ + 50, + 82, + 560, + 118 + ], + "spans": [ + { + "bbox": [ + 50, + 82, + 560, + 118 + ], + "type": "text", + "content": "Table 1: Accuracy of Embodied-R and baselines on 8 indoor and outdoor embodied spatial reasoning tasks. The baselines include popular proprietary models, state-of-the-art (SOTA) multimodal reasoning models, open-sourced video-large language models, and models fine-tuned on the same training dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 52, + 125, + 366, + 482 + ], + "lines": [ + { + "bbox": [ + 52, + 125, + 366, + 482 + ], + "spans": [ + { + "bbox": [ + 52, + 125, + 366, + 482 + ], + "type": "table", + "html": "
MethodAvg.UrbanVideo-BenchVSI-Bench
Landmark PositionCounterfactualProgress EvaluationAction GenerationRelative DistanceRelative DirectionRoute PlanningAppearance Order
Random24.019.725.021.816.425.036.128.325.0
Proprietary Models (API)
Qwen-VL-Max[32f]34.144.849.238.829.628.033.329.628.3
GPT-4o[32f]35.736.844.734.233.837.041.331.528.5
Gemini-1.5-Flash[1fps]38.337.842.443.334.437.741.031.537.8
Gemini-1.5-Pro[1fps]39.737.446.238.831.951.346.336.034.6
SOTA Reasoning Models (API)
OpenAI-o1[32f]37.234.653.339.128.039.735.852.939.8
Gemini-2.5-Pro[1fps]40.840.075.038.723.542.034.552.463.6
Open-source Models
LLaVA-NeXT-Video-7B-hf[32f]29.549.520.536.619.225.226.329.924.5
Phi-3.5-vision-instruct[32f]29.049.234.833.215.625.426.536.925.2
Kangaroo[64f]30.035.542.432.532.425.226.823.524.9
InternVL2-2B[32]24.519.345.529.220.925.125.032.623.9
InternVL2-8B[32f]25.523.145.531.521.424.725.728.324.8
InternVL2-40B[32f]25.823.241.732.422.324.925.729.424.5
Qwen2.5-VL-3B-Instruct[1fps]33.132.147.834.031.027.932.639.038.9
Qwen2.5-VL-7B-Instruct[1fps]33.333.321.725.027.835.839.748.838.8
Qwen2.5-VL-72B-Instruct[1fps]34.934.734.826.437.740.829.032.543.9
Supervised Fine-Tuning
Qwen2.5-VL-3B-Instruct[1fps]41.747.733.434.839.242.642.341.243.9
Qwen2.5-VL-7B-Instruct[1fps]45.440.253.438.040.847.846.344.156.1
Proposed Embodied-R
VLM-72B + LLM-3B [≤32f]51.155.159.939.747.650.044.336.872.0
", + "image_path": "07b7a0b7ff464267e1f94ac8af3cecb76bbbacb0d09b986b837e1427bcd0b037.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 370, + 125, + 563, + 275 + ], + "blocks": [ + { + "bbox": [ + 370, + 125, + 563, + 275 + ], + "lines": [ + { + "bbox": [ + 370, + 125, + 563, + 275 + ], + "spans": [ + { + "bbox": [ + 370, + 125, + 563, + 275 + ], + "type": "image", + "image_path": "9c8f658d66e081cf723488b6f76f91ee89abd848c964038024d4a57588ef7cd3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 378, + 285, + 550, + 330 + ], + "blocks": [ + { + "bbox": [ + 378, + 285, + 550, + 330 + ], + "lines": [ + { + "bbox": [ + 378, + 285, + 550, + 330 + ], + "spans": [ + { + "bbox": [ + 378, + 285, + 550, + 330 + ], + "type": "table", + "html": "
GPT-4oQwen2.5-VL-72B
Openai-o1Qwen2.5-VL-3B
Gemini-1.5-ProQwen2.5-VL-3B-SFT
InternVL2-40BEmbodied-R
", + "image_path": "bee8e503247e6bfc518e9e846c8a4b872251751a5d9ae2c860f2d80f79ade5f7.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 374, + 357, + 556, + 405 + ], + "blocks": [ + { + "bbox": [ + 380, + 343, + 550, + 354 + ], + "lines": [ + { + "bbox": [ + 380, + 343, + 550, + 354 + ], + "spans": [ + { + "bbox": [ + 380, + 343, + 550, + 354 + ], + "type": "text", + "content": "Table 2: Ablation of Key-Frame Extractor" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 374, + 357, + 556, + 405 + ], + "lines": [ + { + "bbox": [ + 374, + 357, + 556, + 405 + ], + "spans": [ + { + "bbox": [ + 374, + 357, + 556, + 405 + ], + "type": "table", + "html": "
Avg. FrameAcc.Training TimeInference Time
w/o3251.1127.87 h243.68 s
w20.7↓11.349.5↓1.6111.70h↓16.17157.55s↓86.13
", + "image_path": "5ff31b3efbfea499b28ba5da6236251848769ad72fedf064dac36cb1cd14bf7b.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 370, + 434, + 561, + 478 + ], + "blocks": [ + { + "bbox": [ + 394, + 418, + 537, + 429 + ], + "lines": [ + { + "bbox": [ + 394, + 418, + 537, + 429 + ], + "spans": [ + { + "bbox": [ + 394, + 418, + 537, + 429 + ], + "type": "text", + "content": "Table 3: Ablation of Collaboration." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 370, + 434, + 561, + 478 + ], + "lines": [ + { + "bbox": [ + 370, + 434, + 561, + 478 + ], + "spans": [ + { + "bbox": [ + 370, + 434, + 561, + 478 + ], + "type": "table", + "html": "
Avg.LPCPEAGRDistRDirRPAO
w/o34.831.845.728.328.141.029.737.546.0
w51.155.159.939.747.650.044.336.872.0
Δ+16.3+23.3+14.2+11.4+19.5+9.0+14.6-0.7+26.0
", + "image_path": "d3863b9148a96e8d65b704fc34cde1fe3cea544d324697581654052ba2668f19.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 496, + 295, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 496, + 295, + 531 + ], + "spans": [ + { + "bbox": [ + 50, + 496, + 295, + 531 + ], + "type": "text", + "content": "1.0, train batch size: 32, rollout size: 8, KL coefficient: 0.001, maximum response length: 2048, input length: 6144. When conducting inference on the test set, the temperature is set to 0.5." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 539, + 295, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 539, + 295, + 594 + ], + "spans": [ + { + "bbox": [ + 50, + 539, + 295, + 594 + ], + "type": "text", + "content": "4.1.3 Three-Stage Training Schedule. As for the RL training on the LM, we design a three-stage training schedule to achieve a smooth improvement in training performance. The primary distinction between stages lies in the different weight ratios assigned to three types of rewards." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 600, + 295, + 709 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 51, + 600, + 295, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 600, + 295, + 666 + ], + "spans": [ + { + "bbox": [ + 51, + 600, + 295, + 666 + ], + "type": "text", + "content": "- Stage 1: In epochs 1 and 2, the goal is to guide the model to follow the \"\" output format. At this stage, the weights are set as " + }, + { + "bbox": [ + 51, + 600, + 295, + 666 + ], + "type": "inline_equation", + "content": "\\omega_{1}:\\omega_{2}:\\omega_{3} = 7:3:0" + }, + { + "bbox": [ + 51, + 600, + 295, + 666 + ], + "type": "text", + "content": ". Correct format rewards also assist in locating the answer and reduce misjudgment in accuracy. During this phase, the format reward rapidly converges to 1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 666, + 294, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 666, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 51, + 666, + 294, + 709 + ], + "type": "text", + "content": "- Stage 2: In epochs 3 and 4, the focus shifts to improving the accuracy of the model's responses, guiding the model to produce correct reasoning answers. The weights are set as " + }, + { + "bbox": [ + 51, + 666, + 294, + 709 + ], + "type": "inline_equation", + "content": "\\omega_{1}:\\omega_{2}:\\omega_{3} = 3:7:0" + }, + { + "bbox": [ + 51, + 666, + 294, + 709 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 497, + 560, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 497, + 560, + 541 + ], + "spans": [ + { + "bbox": [ + 315, + 497, + 560, + 541 + ], + "type": "text", + "content": "- Stage 3: In subsequent 5-12 epochs, the aim is to enhance accuracy while simultaneously improving the quality of the \"thinking\" process, ensuring logical consistency between thinking and the answer. The weights are set as " + }, + { + "bbox": [ + 315, + 497, + 560, + 541 + ], + "type": "inline_equation", + "content": "\\omega_{1}:\\omega_{2}:\\omega_{3} = 1:7:2" + }, + { + "bbox": [ + 315, + 497, + 560, + 541 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 314, + 559, + 556, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 559, + 556, + 586 + ], + "spans": [ + { + "bbox": [ + 314, + 559, + 556, + 586 + ], + "type": "text", + "content": "4.2 How Does Embodied-R Perform Compared to Existing Video-LLMs?" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 588, + 559, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 588, + 559, + 632 + ], + "spans": [ + { + "bbox": [ + 314, + 588, + 559, + 632 + ], + "type": "text", + "content": "To evaluate the effectiveness of the proposed method, in addition to the random baseline, we introduced four categories comprising 17 multimodal large language models capable of processing video inputs:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 643, + 560, + 710 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 315, + 643, + 560, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 643, + 560, + 676 + ], + "spans": [ + { + "bbox": [ + 315, + 643, + 560, + 676 + ], + "type": "text", + "content": "- Proprietary Models: Cost-effective multimodal models with over 100B parameters, including Qwen-VL-Max [46], GPT-4o [37], Gemini-1.5-Flash [44], and Gemini-1.5-Pro [44]." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 677, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 677, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 315, + 677, + 559, + 710 + ], + "type": "text", + "content": "- SOTA Reasoning Models: State-of-the-art reasoning models with the highest performance but significant computational cost, including OpenAI-o1 [38] and Gemini-2.5-Pro [21]." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 65, + 91, + 302, + 285 + ], + "blocks": [ + { + "bbox": [ + 65, + 91, + 302, + 285 + ], + "lines": [ + { + "bbox": [ + 65, + 91, + 302, + 285 + ], + "spans": [ + { + "bbox": [ + 65, + 91, + 302, + 285 + ], + "type": "image", + "image_path": "c318cdb8f4090e0a3adc8de0a1fd78af3544fb0478b79abbbdcf108a3971a0d2.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 289, + 560, + 322 + ], + "lines": [ + { + "bbox": [ + 50, + 289, + 560, + 322 + ], + "spans": [ + { + "bbox": [ + 50, + 289, + 560, + 322 + ], + "type": "text", + "content": "Figure 3: Case Analysis: Embodied-R has initially developed the ability for slow-thinking: it can think before answering, effectively distinguish spatial relationships, provide structured and organized responses, and integrate information across multiple frames for embodied scene analysis." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 306, + 91, + 542, + 285 + ], + "blocks": [ + { + "bbox": [ + 306, + 91, + 542, + 285 + ], + "lines": [ + { + "bbox": [ + 306, + 91, + 542, + 285 + ], + "spans": [ + { + "bbox": [ + 306, + 91, + 542, + 285 + ], + "type": "image", + "image_path": "2b9e5f7a41386ace07624dec3ec03897c00121bef7fd942a2b707bf0ce1c5755.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 66, + 331, + 279, + 456 + ], + "blocks": [ + { + "bbox": [ + 66, + 331, + 279, + 456 + ], + "lines": [ + { + "bbox": [ + 66, + 331, + 279, + 456 + ], + "spans": [ + { + "bbox": [ + 66, + 331, + 279, + 456 + ], + "type": "image", + "image_path": "62b0a49ba975743ab2da724498d96bedd02c9a57a3ea5856833650e9fef3dfdb.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 463, + 295, + 485 + ], + "lines": [ + { + "bbox": [ + 50, + 463, + 295, + 485 + ], + "spans": [ + { + "bbox": [ + 50, + 463, + 295, + 485 + ], + "type": "text", + "content": "Figure 4: Ablation of RL training and comparison to other language models." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 51, + 494, + 306, + 710 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 51, + 494, + 306, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 494, + 306, + 527 + ], + "spans": [ + { + "bbox": [ + 51, + 494, + 306, + 527 + ], + "type": "text", + "content": "- Open-Source Models: Popular open-source multimodal models, including LLaVA-NeXT-Video-7B-hf [29], Phi-3.5-vision-instruct [1], the Internvl2 series [11], and the Qwen-VL series [6]." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 527, + 295, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 527, + 295, + 586 + ], + "spans": [ + { + "bbox": [ + 51, + 527, + 295, + 586 + ], + "type": "text", + "content": "- Supervised Fine-Tuning (SFT): Considering the scarcity of embodied video tasks, the aforementioned models may lack exposure to relevant data. Therefore, Qwen2.5-VL-3B-Instruct [6] and Qwen2.5-VL-7B-Instruct [6] are fine-tuned for these tasks. The results presented in Table 1 lead to the following conclusions:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 589, + 296, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 589, + 296, + 710 + ], + "spans": [ + { + "bbox": [ + 51, + 589, + 296, + 710 + ], + "type": "text", + "content": "- After undergoing RL training on embodied reasoning tasks, our model significantly outperformed proprietary models as well as OpenAI-o1 and Gemini-2.5-Pro by over " + }, + { + "bbox": [ + 51, + 589, + 296, + 710 + ], + "type": "inline_equation", + "content": "10\\%" + }, + { + "bbox": [ + 51, + 589, + 296, + 710 + ], + "type": "text", + "content": ". Moreover, it consistently demonstrated leading performance across various tasks. These results highlight the considerable difficulty of embodied reasoning tasks and indicate that current reasoning models lack generalization capability for such spatial reasoning challenges. On the other hand, the findings confirm that collaborative framework with RL can effectively enhance model reasoning performance in specific domains, especially for tasks that remain poorly solved." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 334, + 561, + 477 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 315, + 334, + 560, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 334, + 560, + 421 + ], + "spans": [ + { + "bbox": [ + 315, + 334, + 560, + 421 + ], + "type": "text", + "content": "- For embodied video reasoning, a highly coupled perception-reasoning problem, the VLM model Qwen2.5-VL-72B-Instruct achieved an accuracy of only " + }, + { + "bbox": [ + 315, + 334, + 560, + 421 + ], + "type": "inline_equation", + "content": "34.9\\%" + }, + { + "bbox": [ + 315, + 334, + 560, + 421 + ], + "type": "text", + "content": " through direct inference. In contrast, incorporating a small-scale LM model improved accuracy to " + }, + { + "bbox": [ + 315, + 334, + 560, + 421 + ], + "type": "inline_equation", + "content": "51.1\\%" + }, + { + "bbox": [ + 315, + 334, + 560, + 421 + ], + "type": "text", + "content": ". Given limited computational resources for training, the collaborative framework proposed in this study provides an effective solution for balancing model size with hardware constraints." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 315, + 422, + 561, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 422, + 561, + 477 + ], + "spans": [ + { + "bbox": [ + 315, + 422, + 561, + 477 + ], + "type": "text", + "content": "- Under similar computational resource limitations, direct fine-tuning is restricted to models with a size of 7B or smaller. However, the perceptual capacity of small-scale VL models imposes a low upper bound on accuracy compared to Embodied-R. Additionally, fine-tuned models lack the capability for slow-thinking." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 496, + 550, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 496, + 550, + 509 + ], + "spans": [ + { + "bbox": [ + 315, + 496, + 550, + 509 + ], + "type": "text", + "content": "4.3 Has Embodied-R Learned Slow-Thinking?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 510, + 560, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 510, + 560, + 555 + ], + "spans": [ + { + "bbox": [ + 314, + 510, + 560, + 555 + ], + "type": "text", + "content": "Beyond the quantitative results, we aim to explore whether spatial reasoning capabilities in the output of Embodied-R are improved. As illustrated in Figure 3, after RL training, Embodied-R demonstrates the following human-like reasoning ways:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 566, + 564, + 676 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 315, + 566, + 559, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 566, + 559, + 597 + ], + "spans": [ + { + "bbox": [ + 315, + 566, + 559, + 597 + ], + "type": "text", + "content": "- Spatial Relationship Reasoning: Accurately inferring the relative spatial relationship between itself and the surrounding environment." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 599, + 559, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 599, + 559, + 632 + ], + "spans": [ + { + "bbox": [ + 315, + 599, + 559, + 632 + ], + "type": "text", + "content": "- Systematic Analysis: Breaking down problems into components, presenting answers with a \"part-to-whole\" structure, and maintaining clear logical organization." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 632, + 564, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 632, + 564, + 654 + ], + "spans": [ + { + "bbox": [ + 315, + 632, + 564, + 654 + ], + "type": "text", + "content": "- Contextual Integration: Integrating semantic information across different frames to perform comprehensive analysis." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 654, + 559, + 676 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 654, + 559, + 676 + ], + "spans": [ + { + "bbox": [ + 315, + 654, + 559, + 676 + ], + "type": "text", + "content": "- Think-Answer Format: Strictly adhering to a structured process of reasoning before outputting the final answer." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 687, + 560, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 687, + 560, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 687, + 560, + 710 + ], + "type": "text", + "content": "In summary, Embodied-R demonstrates a certain degree of slow-thinking capability in embodied spatial reasoning." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 56, + 87, + 181, + 187 + ], + "blocks": [ + { + "bbox": [ + 56, + 87, + 181, + 187 + ], + "lines": [ + { + "bbox": [ + 56, + 87, + 181, + 187 + ], + "spans": [ + { + "bbox": [ + 56, + 87, + 181, + 187 + ], + "type": "image", + "image_path": "b72f62ac79b3b4ecd587477fe5880cff8321027484d74e9a4a7c196167a6ee4f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 181, + 86, + 304, + 187 + ], + "blocks": [ + { + "bbox": [ + 181, + 86, + 304, + 187 + ], + "lines": [ + { + "bbox": [ + 181, + 86, + 304, + 187 + ], + "spans": [ + { + "bbox": [ + 181, + 86, + 304, + 187 + ], + "type": "image", + "image_path": "02393c833bbae8e6c12fcf40fb48a9b3a5d87263d5f9abc11d381f176dd35a56.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 304, + 87, + 430, + 188 + ], + "blocks": [ + { + "bbox": [ + 304, + 87, + 430, + 188 + ], + "lines": [ + { + "bbox": [ + 304, + 87, + 430, + 188 + ], + "spans": [ + { + "bbox": [ + 304, + 87, + 430, + 188 + ], + "type": "image", + "image_path": "77957a7d881be79b7f92f5a86858a4906ac4cfb5dbbbfe27cc3a19d2ebeec4a2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 430, + 86, + 558, + 188 + ], + "blocks": [ + { + "bbox": [ + 430, + 86, + 558, + 188 + ], + "lines": [ + { + "bbox": [ + 430, + 86, + 558, + 188 + ], + "spans": [ + { + "bbox": [ + 430, + 86, + 558, + 188 + ], + "type": "image", + "image_path": "5a7204fe5ebc0eb56147d5cc28c1831272e0043c1ada6eb035614ccd44df17a2.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 54, + 193, + 194, + 304 + ], + "blocks": [ + { + "bbox": [ + 54, + 193, + 194, + 304 + ], + "lines": [ + { + "bbox": [ + 54, + 193, + 194, + 304 + ], + "spans": [ + { + "bbox": [ + 54, + 193, + 194, + 304 + ], + "type": "image", + "image_path": "bcdca8529b76556187765c9b5a87cb0912a7dc6dfab13b6c798397055f198039.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 50, + 314, + 559, + 357 + ], + "lines": [ + { + "bbox": [ + 50, + 314, + 559, + 357 + ], + "spans": [ + { + "bbox": [ + 50, + 314, + 559, + 357 + ], + "type": "text", + "content": "Figure 5: a-d. The GRPO training process (a: accuracy reward; b: format reward; c: ratio of logical consistency reward to accuracy reward; d: response length of validation set). e. Comparison of accuracy reward curves for RL training of equivalently sized LM and VLM models. f. Model performance before and after integrating logical consistency reward. g. Comparison of generalization performance between models trained with RL and SFT." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 195, + 191, + 386, + 304 + ], + "blocks": [ + { + "bbox": [ + 195, + 191, + 386, + 304 + ], + "lines": [ + { + "bbox": [ + 195, + 191, + 386, + 304 + ], + "spans": [ + { + "bbox": [ + 195, + 191, + 386, + 304 + ], + "type": "image", + "image_path": "a38e1a2a0590e90f4eb270befb537b4680adaf74a94c8e1149fbd2b6a5e1a4c2.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 391, + 193, + 557, + 304 + ], + "blocks": [ + { + "bbox": [ + 391, + 193, + 557, + 304 + ], + "lines": [ + { + "bbox": [ + 391, + 193, + 557, + 304 + ], + "spans": [ + { + "bbox": [ + 391, + 193, + 557, + 304 + ], + "type": "image", + "image_path": "5b0471981f3a1f19157135899e7655288f65f2ed892ff00e95c43e9724d27af4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 364, + 229, + 374 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 364, + 229, + 374 + ], + "spans": [ + { + "bbox": [ + 51, + 364, + 229, + 374 + ], + "type": "text", + "content": "4.4 Contributions of Each Module" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 378, + 295, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 378, + 295, + 445 + ], + "spans": [ + { + "bbox": [ + 50, + 378, + 295, + 445 + ], + "type": "text", + "content": "4.4.1 Ablation of Key-Frame Extractor. The role of Key-Frame Extractor is to reduce inference time and training time by retaining essential frames and removing redundant ones while maintaining perceptual quality. As shown in Table 2, with negligible differences in accuracy, training time is significantly reduced by " + }, + { + "bbox": [ + 50, + 378, + 295, + 445 + ], + "type": "inline_equation", + "content": "8.7\\%" + }, + { + "bbox": [ + 50, + 378, + 295, + 445 + ], + "type": "text", + "content": ", and single inference time is reduced by approximately one-third." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 451, + 295, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 451, + 295, + 538 + ], + "spans": [ + { + "bbox": [ + 50, + 451, + 295, + 538 + ], + "type": "text", + "content": "4.4.2 Ablation of Collaboration. The collaborative framework enables improved reasoning capabilities under limited computational resources for training. With training-free large-scale pretrained VLMs, it only requires training small-scale LM models to achieve enhanced reasoning performance. As shown in Table 3, with identical key-frame inputs and using the same VLM, Qwen2.5-VL-72B-Instruct, the overall accuracy of collaborative inference is 1.5 times higher than that of the standalone VLM." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 545, + 295, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 545, + 295, + 620 + ], + "spans": [ + { + "bbox": [ + 50, + 545, + 295, + 620 + ], + "type": "text", + "content": "4.4.3 Ablation of RL Training. RL is central to the LM training in this paper. Without RL training, directly applying the original LM-3B model for reasoning leads to poor performance, as the LM has limited exposure to embodied spatial reasoning data during pretraining. After RL training, the LM achieves significant improvements, with a " + }, + { + "bbox": [ + 50, + 545, + 295, + 620 + ], + "type": "inline_equation", + "content": "27.9\\%" + }, + { + "bbox": [ + 50, + 545, + 295, + 620 + ], + "type": "text", + "content": " increase on the UrbanVideo-Bench and a " + }, + { + "bbox": [ + 50, + 545, + 295, + 620 + ], + "type": "inline_equation", + "content": "20.6\\%" + }, + { + "bbox": [ + 50, + 545, + 295, + 620 + ], + "type": "text", + "content": " increase on the VSI-Bench benchmarks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 622, + 295, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 622, + 295, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 622, + 295, + 709 + ], + "type": "text", + "content": "Given that VLM has already transformed visual inputs into textual representations, we introduced 4 text-based reasoning models (o3-mini [39], Deepseek-R1 [24], Qwen-Max [46], Qwen2.5-7B-Instruct [6]) as baselines to further assess the importance of reasoning capability in the embodied spatial task. The results demonstrate a clear positive correlation between the reasoning ability of the model and its accuracy. The strong performance of Embodied-R may not only stem from its familiarity with the data distribution" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 314, + 365, + 559, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 365, + 559, + 408 + ], + "spans": [ + { + "bbox": [ + 314, + 365, + 559, + 408 + ], + "type": "text", + "content": "but also from its synergy with the representations provided by the VLM. Following training, the small-scale LM becomes more attuned to the VLM-generated representations, which translates into enhanced performance on embodied reasoning tasks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 415, + 434, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 415, + 434, + 427 + ], + "spans": [ + { + "bbox": [ + 315, + 415, + 434, + 427 + ], + "type": "text", + "content": "5 Further Exploration" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 430, + 558, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 430, + 558, + 452 + ], + "spans": [ + { + "bbox": [ + 314, + 430, + 558, + 452 + ], + "type": "text", + "content": "Building upon the aforementioned experiments, we further explore four intriguing RQs related to embodied video-based RL training:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 459, + 559, + 523 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 315, + 459, + 559, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 459, + 559, + 480 + ], + "spans": [ + { + "bbox": [ + 315, + 459, + 559, + 480 + ], + "type": "text", + "content": "- RQ4: What Is the Relationship Between Inference Ability, Aha Moments, and Response Length?" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 481, + 557, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 481, + 557, + 491 + ], + "spans": [ + { + "bbox": [ + 315, + 481, + 557, + 491 + ], + "type": "text", + "content": "- RQ5: Why Not Directly Perform RL Training on VLLMs?" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 315, + 492, + 527, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 492, + 527, + 502 + ], + "spans": [ + { + "bbox": [ + 315, + 492, + 527, + 502 + ], + "type": "text", + "content": "- RQ6: Is Accuracy+Format Rewards All You Need?" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 315, + 503, + 558, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 503, + 558, + 523 + ], + "spans": [ + { + "bbox": [ + 315, + 503, + 558, + 523 + ], + "type": "text", + "content": "- RQ7: RL vs SFT when Generalize to Out-of-Distribution (OOD) Embodied Tasks?" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 315, + 539, + 541, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 539, + 541, + 565 + ], + "spans": [ + { + "bbox": [ + 315, + 539, + 541, + 565 + ], + "type": "text", + "content": "5.1 Relationship Between Inference Ability, Aha Moments, and Response Length?" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 567, + 559, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 567, + 559, + 709 + ], + "spans": [ + { + "bbox": [ + 313, + 567, + 559, + 709 + ], + "type": "text", + "content": "The GRPO training process is illustrated in Figure 5a-d, which correspond to the validation set's accuracy reward, format reward, ratio of logical consistency reward to accuracy reward, and the response length, respectively. Notably, existing pure-text-based reproductions [55, 59] of DeepSeek-R-Zero models identify inference ability and the \"aha moment\" as key indicators of emergent reasoning capabilities. However, such phenomena are rarely observed in other multimodal reasoning tasks, such as image-based reasoning [10, 33]. This leads us to hypothesize that response length is strongly influenced by the nature of the question itself. For instance, mathematical problems often require multi-step calculations, where increased reasoning length tends to correlate positively with reasoning ability. In contrast, for multimodal reasoning tasks like embodied spatial" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 140 + ], + "type": "text", + "content": "reasoning, the LM model training process converges toward an optimal range of text output distributions. Concise reasoning patterns may facilitate the embodied spatial reasoning. This highlights the versatility of RL-based post-training method, demonstrating the ability to benefit a wide range of reasoning tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 148, + 285, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 148, + 285, + 161 + ], + "spans": [ + { + "bbox": [ + 51, + 148, + 285, + 161 + ], + "type": "text", + "content": "5.2 Why Not Directly Perform RL on VLLMs?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 163, + 295, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 163, + 295, + 262 + ], + "spans": [ + { + "bbox": [ + 50, + 163, + 295, + 262 + ], + "type": "text", + "content": "We previously attempted direct RL training on the Qwen-VL-3B-Instruct model. As shown in Figure 5e, under similar training parameters and time, the performance of the VLM was notably inferior to that of the LM. Upon convergence, the VLM achieved an accuracy of " + }, + { + "bbox": [ + 50, + 163, + 295, + 262 + ], + "type": "inline_equation", + "content": "43.8\\%" + }, + { + "bbox": [ + 50, + 163, + 295, + 262 + ], + "type": "text", + "content": " on the test set, significantly lower than the LM. The limited perceptual capability of the VLM restricts its potential for reasoning improvements. Therefore, under resource-constrained conditions, collaborative inference integrating models of different scales present a promising solution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 51, + 272, + 294, + 284 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 272, + 294, + 284 + ], + "spans": [ + { + "bbox": [ + 51, + 272, + 294, + 284 + ], + "type": "text", + "content": "5.3 Is Accuracy+Format Rewards All You Need?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 287, + 295, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 287, + 295, + 462 + ], + "spans": [ + { + "bbox": [ + 50, + 287, + 295, + 462 + ], + "type": "text", + "content": "According to the Deepseek-R1-Zero, it appears that accuracy and format rewards are enough to guide the model toward correct reasoning. However, during training in our problem, we observed instances of reward hacking, where the model optimizes the answer but the reasoning process leading to that answer is inconsistent with the answer itself. We aim to ensure alignment between the model's reasoning process and its answer, both to enhance generalization and improve the interpretability of the reasoning process. As shown in Figure 5f, we employ GPT-4o to evaluate the proportion of logically consistent outputs on the test set before and after incorporating a logical consistency reward. This proportion increased from " + }, + { + "bbox": [ + 50, + 287, + 295, + 462 + ], + "type": "inline_equation", + "content": "46.01\\%" + }, + { + "bbox": [ + 50, + 287, + 295, + 462 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 50, + 287, + 295, + 462 + ], + "type": "inline_equation", + "content": "99.43\\%" + }, + { + "bbox": [ + 50, + 287, + 295, + 462 + ], + "type": "text", + "content": " after the reward was added, demonstrating the value of this approach in addressing embodied spatial multiple-choice reasoning tasks. Moreover, this reward mechanism could potentially be extended to other reasoning tasks prone to answer accuracy hacking during training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 472, + 226, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 472, + 226, + 483 + ], + "spans": [ + { + "bbox": [ + 51, + 472, + 226, + 483 + ], + "type": "text", + "content": "5.4 RL vs SFT when Generalize to" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 484, + 294, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 484, + 294, + 496 + ], + "spans": [ + { + "bbox": [ + 76, + 484, + 294, + 496 + ], + "type": "text", + "content": "Out-of-Distribution (OOD) Embodied Tasks?" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 499, + 295, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 499, + 295, + 653 + ], + "spans": [ + { + "bbox": [ + 50, + 499, + 295, + 653 + ], + "type": "text", + "content": "For small-scale LMs, we aim to explore their generalization performance when trained with SFT instead of RL. To evaluate this, we introduced two OOD datasets: EgoSchema and the egocentric task in MVBench. As discussed in Sections 4.1.1, these two OOD datasets differ significantly from the training set in both task content and scene characteristics. The accuracy results are shown in Figure 5g. RL-trained models demonstrate generalization ability across both datasets. On the EgoSchema dataset, the RL-trained language model under the Embodied-R framework even achieve performance comparable to the state-of-the-art multimodal reasoning model, Gemini2.5-Pro. SFT-trained models showed improvement on EgoSchema but a decline on MVBench. This suggests that slow reasoning, as employed in RL models, could be a promising approach to improve the generalization capabilities even for small-scale models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 662, + 127, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 662, + 127, + 673 + ], + "spans": [ + { + "bbox": [ + 51, + 662, + 127, + 673 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 677, + 295, + 710 + ], + "type": "text", + "content": "To address embodied spatial reasoning tasks, we propose a collaborative framework that leverages the perceptual capabilities of large-scale VLMs and the reasoning potential of compact LMs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "text", + "content": "Through 90 hours of RL training on a 3B LM using 8 NVIDIA A800-SXM4-40GB GPUs, Embodied-R surpasses OpenAI-o1 by " + }, + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "inline_equation", + "content": "13.9\\%" + }, + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "text", + "content": " and Gemini-2.5-Pro by " + }, + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "inline_equation", + "content": "10.3\\%" + }, + { + "bbox": [ + 313, + 84, + 559, + 194 + ], + "type": "text", + "content": " on the test set. Other Key findings include: (1) RL training leads to output length convergence, aligning with the requirements of the task; (2) the reasoning upper bound of same-scale VLMs trained with RL is significantly lower than that of Embodied-R, due to inherent limitations in perception; (3) the proposed logical consistency reward enhances reasoning quality; and (4) models trained via RL exhibit stronger generalization on out-of-distribution datasets compared to those trained with SFT." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 315, + 209, + 372, + 219 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 209, + 372, + 219 + ], + "spans": [ + { + "bbox": [ + 315, + 209, + 372, + 219 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 317, + 221, + 559, + 708 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 319, + 221, + 559, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 221, + 559, + 255 + ], + "spans": [ + { + "bbox": [ + 319, + 221, + 559, + 255 + ], + "type": "text", + "content": "[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. 2024. Phi-3 technical report: A highly capable language model locally on your phone. arXiv preprint arXiv:2404.14219 (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 320, + 255, + 559, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 255, + 559, + 285 + ], + "spans": [ + { + "bbox": [ + 320, + 255, + 559, + 285 + ], + "type": "text", + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ige Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. 2023. Gpt-4 technical report. arXiv preprint arXiv:2303.08774 (2023)." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 320, + 286, + 559, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 286, + 559, + 318 + ], + "spans": [ + { + "bbox": [ + 320, + 286, + 559, + 318 + ], + "type": "text", + "content": "[3] Michael Ahn, Debidatta Dwibedi, Chelsea Finn, Montse Gonzalez Arenas, Keerthana Gopalakrishnan, Karol Hausman, Brian Ichter, Alex Irpan, Nikhil Joshi, Ryan Julian, et al. 2024. Autort: Embodied foundation models for large scale orchestration of robotic agents. arXiv preprint arXiv:2401.12963 (2024)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 320, + 318, + 559, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 318, + 559, + 350 + ], + "spans": [ + { + "bbox": [ + 320, + 318, + 559, + 350 + ], + "type": "text", + "content": "[4] Cameron A Aubin, Benjamin Gorissen, Edoardo Milana, Philip R Buskohl, Nathan Lazarus, Geoffrey A Slipher, Christoph Keplinger, Josh Bongard, Fumiya Iida, Jennifer A Lewis, et al. 2022. Towards enduring autonomous robots via embodied energy. Nature 602, 7897 (2022), 393-402." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 320, + 350, + 559, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 350, + 559, + 373 + ], + "spans": [ + { + "bbox": [ + 320, + 350, + 559, + 373 + ], + "type": "text", + "content": "[5] Daichi Azuma, Taiki Miyanishi, Shuhei Kurita, and Motoaki Kawanabe. 2022. Scanqa: 3d question answering for spatial scene understanding. In proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 19129-19139." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 320, + 374, + 559, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 374, + 559, + 397 + ], + "spans": [ + { + "bbox": [ + 320, + 374, + 559, + 397 + ], + "type": "text", + "content": "[6] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. 2025. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923 (2025)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 320, + 398, + 559, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 398, + 559, + 429 + ], + "spans": [ + { + "bbox": [ + 320, + 398, + 559, + 429 + ], + "type": "text", + "content": "[7] Keshigeyan Chandrasegaran, Agrim Gupta, Lea M Hadzic, Taran Kota, Jimming He, Cristóbal Eyzaguirre, Zane Durante, Manling Li, Jiajun Wu, and Fei-Fei Li. 2024. Hourvideo: 1-hour video-language understanding. Advances in Neural Information Processing Systems 37 (2024), 53168-53197." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 320, + 430, + 559, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 430, + 559, + 460 + ], + "spans": [ + { + "bbox": [ + 320, + 430, + 559, + 460 + ], + "type": "text", + "content": "[8] Bolei Chen, Jiaxu Kang, Ping Zhong, Yixiong Liang, Yu Sheng, and Jianxin Wang. 2024. Embodied Contrastive Learning with Geometric Consistency and Behavioral Awareness for Object Navigation. In Proceedings of the 32nd ACM International Conference on Multimedia, 4776-4785." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 320, + 461, + 559, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 461, + 559, + 493 + ], + "spans": [ + { + "bbox": [ + 320, + 461, + 559, + 493 + ], + "type": "text", + "content": "[9] Boyuan Chen, Zhuo Xu, Sean Kirmani, Brain Ichter, Dorsa Sadigh, Leonidas Guibas, and Fei Xia. 2024. Spatialvlm: Endowing vision-language models with spatial reasoning capabilities. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 14455-14465." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 494, + 559, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 494, + 559, + 517 + ], + "spans": [ + { + "bbox": [ + 317, + 494, + 559, + 517 + ], + "type": "text", + "content": "[10] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. 2025. R1-V: Reinforcing Super Generalization Ability in Vision-Language Models with Less Than $3. https://github.com/Deep-Agent/R1-V. Accessed: 2025-02-02." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 517, + 559, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 517, + 559, + 555 + ], + "spans": [ + { + "bbox": [ + 317, + 517, + 559, + 555 + ], + "type": "text", + "content": "[11] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2024. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 24185-24198." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 317, + 557, + 559, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 557, + 559, + 588 + ], + "spans": [ + { + "bbox": [ + 317, + 557, + 559, + 588 + ], + "type": "text", + "content": "[12] Sijie Cheng, Kichen Fang, Yangyang Yu, Sicheng Zhou, Bohao Li, Ye Tian, Tingguang Li, Lei Han, and Yang Liu. 2024. Videogthink: Assessing egocentric video understanding capabilities for embodied ai. arXiv preprint arXiv:2410.11623 (2024)." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 317, + 589, + 559, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 589, + 559, + 620 + ], + "spans": [ + { + "bbox": [ + 317, + 589, + 559, + 620 + ], + "type": "text", + "content": "[13] Stephanie Clarke and Judit Miklossy. 1990. Occipital cortex in man: Organization of callosal connections, related myelo-and cytoarchitecture, and putative boundaries of functional visual areas. Journal of Comparative Neurology 298, 2 (1990), 188-214." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 621, + 559, + 636 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 621, + 559, + 636 + ], + "spans": [ + { + "bbox": [ + 317, + 621, + 559, + 636 + ], + "type": "text", + "content": "[14] Maël Donoso, Anne GE Collins, and Etienne Koechlin. 2014. Foundations of human reasoning in the prefrontal cortex. Science 344, 6191 (2014), 1481-1486." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 637, + 559, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 637, + 559, + 661 + ], + "spans": [ + { + "bbox": [ + 317, + 637, + 559, + 661 + ], + "type": "text", + "content": "[15] Danny Driess, Fei Xia, Mehdi SM Sajjadi, Corey Lynch, Aankansha Chowdhery, Ayzaan Wahid, Jonathan Tompson, Quan Vuong, Tianhe Yu, Wenlong Huang, et al. 2023. Palm-e: An embodied multimodal language model. (2023)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 317, + 661, + 559, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 661, + 559, + 685 + ], + "spans": [ + { + "bbox": [ + 317, + 661, + 559, + 685 + ], + "type": "text", + "content": "[16] Hao Fei, Shengqiong Wu, Wei Ji, Hanwang Zhang, Meishan Zhang, Mong-Li Lee, and Wynne Hsu. 2024. Video-of-thought: Step-by-step video reasoning from perception to cognition. arXiv preprint arXiv:2501.03230 (2024)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 685, + 559, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 685, + 559, + 708 + ], + "spans": [ + { + "bbox": [ + 317, + 685, + 559, + 708 + ], + "type": "text", + "content": "[17] Nanyi Fei, Zhiwu Lu, Yizhao Gao, Guoxing Yang, Yuqi Huo, Jingyuan Wen, Haoyu Lu, Ruihua Song, Xin Gao, Tao Xiang, et al. 2022. Towards artificial general intelligence via a multimodal foundation model. Nature Communications" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 53, + 87, + 294, + 708 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 69, + 87, + 119, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 87, + 119, + 95 + ], + "spans": [ + { + "bbox": [ + 69, + 87, + 119, + 95 + ], + "type": "text", + "content": "13, 1 (2022), 3094." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 53, + 95, + 294, + 118 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 95, + 294, + 118 + ], + "spans": [ + { + "bbox": [ + 53, + 95, + 294, + 118 + ], + "type": "text", + "content": "[18] Leonardo Fogassi, Pier Francesco Ferrari, Benno Gesierich, Stefano Rozzi, Fabian Chersi, and Giacomo Rizzolatti. 2005. Parietal lobe: from action organization to intention understanding. Science 308, 5722 (2005), 662-667." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 53, + 118, + 294, + 135 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 118, + 294, + 135 + ], + "spans": [ + { + "bbox": [ + 53, + 118, + 294, + 135 + ], + "type": "text", + "content": "[19] Lucia Foglia and Robert A Wilson. 2013. Embodied cognition. Wiley Interdisciplinary Reviews: Cognitive Science 4, 3 (2013), 319-325." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 135, + 294, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 135, + 294, + 167 + ], + "spans": [ + { + "bbox": [ + 53, + 135, + 294, + 167 + ], + "type": "text", + "content": "[20] Chen Gao, Baining Zhao, Weichen Zhang, Jinzhu Mao, Jun Zhang, Zhiheng Zheng, Fanhang Man, Jianjie Fang, Zile Zhou, Jinqiang Cui, et al. 2024. EmbodiedCity: A Benchmark Platform for Embodied Agent in Real-world City Environment. arXiv preprint arXiv:2410.09604 (2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 53, + 167, + 294, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 167, + 294, + 182 + ], + "spans": [ + { + "bbox": [ + 53, + 167, + 294, + 182 + ], + "type": "text", + "content": "[21] Google. 2024. Gemini API. https://ai.google.dev/gemini-api. Accessed: 2025-04-12." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 53, + 182, + 294, + 221 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 182, + 294, + 221 + ], + "spans": [ + { + "bbox": [ + 53, + 182, + 294, + 221 + ], + "type": "text", + "content": "[22] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. 2022. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 18995-19012." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 53, + 222, + 294, + 247 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 222, + 294, + 247 + ], + "spans": [ + { + "bbox": [ + 53, + 222, + 294, + 247 + ], + "type": "text", + "content": "[23] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. 2025. rStar-Math: Small LLMs Can Master Math Reasoning with Self-Evolved Deep Thinking. arXiv preprint arXiv:2501.04519 (2025)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 53, + 247, + 294, + 278 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 247, + 294, + 278 + ], + "spans": [ + { + "bbox": [ + 53, + 247, + 294, + 278 + ], + "type": "text", + "content": "[24] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948 (2025)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 53, + 278, + 294, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 278, + 294, + 302 + ], + "spans": [ + { + "bbox": [ + 53, + 278, + 294, + 302 + ], + "type": "text", + "content": "[25] Shima Imani, Liang Du, and Harsh Shrivastava. 2023. Mathprompter: Mathematical reasoning using large language models. arXiv preprint arXiv:2303.05398 (2023)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 53, + 302, + 294, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 302, + 294, + 318 + ], + "spans": [ + { + "bbox": [ + 53, + 302, + 294, + 318 + ], + "type": "text", + "content": "[26] James Intriligator and Patrick Cavanagh. 2001. The spatial resolution of visual attention. Cognitive psychology 43, 3 (2001), 171-216." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 53, + 318, + 294, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 318, + 294, + 350 + ], + "spans": [ + { + "bbox": [ + 53, + 318, + 294, + 350 + ], + "type": "text", + "content": "[27] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. 2024. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 22195-22206." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 53, + 350, + 294, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 350, + 294, + 373 + ], + "spans": [ + { + "bbox": [ + 53, + 350, + 294, + 373 + ], + "type": "text", + "content": "[28] Tianlin Li, Qian Liu, Tianyu Pang, Chao Du, Qing Guo, Yang Liu, and Min Lin. 2024. Purifying large language models by assembling a small language model. arXiv preprint arXiv:2402.14845 (2024)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 373, + 294, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 373, + 294, + 397 + ], + "spans": [ + { + "bbox": [ + 53, + 373, + 294, + 397 + ], + "type": "text", + "content": "[29] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. 2023. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122 (2023)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 398, + 294, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 398, + 294, + 414 + ], + "spans": [ + { + "bbox": [ + 53, + 398, + 294, + 414 + ], + "type": "text", + "content": "[30] Fangyu Liu, Guy Emerson, and Nigel Collier. 2023. Visual spatial reasoning. Transactions of the Association for Computational Linguistics 11 (2023), 635-651." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 414, + 294, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 414, + 294, + 437 + ], + "spans": [ + { + "bbox": [ + 53, + 414, + 294, + 437 + ], + "type": "text", + "content": "[31] Hongbin Liu, Yongze Zhao, Peng Dong, Xiuyi Guo, and Yilin Wang. 2024. IOFTracker: A Two-Stage Multiple Targets Tracking Method Using Spatial-Temporal Fusion Algorithm. Applied Sciences 15, 1 (2024), 107." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 437, + 294, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 437, + 294, + 461 + ], + "spans": [ + { + "bbox": [ + 53, + 437, + 294, + 461 + ], + "type": "text", + "content": "[32] Yang Liu, Weixing Chen, Yongjie Bai, Xiaodan Liang, Guanbin Li, Wen Gao, and Liang Lin. 2024. Aligning cyber space with physical world: A comprehensive survey on embodied ai. arXiv preprint arXiv:2407.06886 (2024)." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 461, + 294, + 485 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 461, + 294, + 485 + ], + "spans": [ + { + "bbox": [ + 53, + 461, + 294, + 485 + ], + "type": "text", + "content": "[33] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. 2025. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785 (2025)." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 485, + 294, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 485, + 294, + 517 + ], + "spans": [ + { + "bbox": [ + 53, + 485, + 294, + 517 + ], + "type": "text", + "content": "[34] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. 2023. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems 36 (2023), 46212-46244." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 53, + 517, + 294, + 548 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 517, + 294, + 548 + ], + "spans": [ + { + "bbox": [ + 53, + 517, + 294, + 548 + ], + "type": "text", + "content": "[35] Yingqian Min, Zhipeng Chen, Jinhao Jiang, Jie Chen, Jia Deng, Yiwen Hu, Yiru Tang, Jiapeng Wang, Xiaoxue Cheng, Huatong Song, et al. 2024. Imitate, explore, and self-improve: A reproduction report on slow-thinking reasoning systems. arXiv preprint arXiv:2412.09413 (2024)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 53, + 548, + 294, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 548, + 294, + 582 + ], + "spans": [ + { + "bbox": [ + 53, + 548, + 294, + 582 + ], + "type": "text", + "content": "[36] Yao Mu, Qinglong Zhang, Mengkang Hu, Wenhai Wang, Mingyu Ding, Jun Jin, Bin Wang, Jifeng Dai, Yu Qiao, and Ping Luo. 2023. Embodiedgpt: Vision-language pre-training via embodied chain of thought. Advances in Neural Information Processing Systems 36 (2023), 25081-25094." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 53, + 582, + 283, + 590 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 582, + 283, + 590 + ], + "spans": [ + { + "bbox": [ + 53, + 582, + 283, + 590 + ], + "type": "text", + "content": "[37] OpenAI. 2024. GPT-4o API. https://openai.com/api/. Accessed: 2025-04-12." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 53, + 590, + 294, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 590, + 294, + 605 + ], + "spans": [ + { + "bbox": [ + 53, + 590, + 294, + 605 + ], + "type": "text", + "content": "[38] OpenAI. 2024. Learning to Reason with LLMs. https://openai.com/index/learning-to-reason-with-llms/ Accessed: 2025-03-04." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 53, + 605, + 294, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 605, + 294, + 620 + ], + "spans": [ + { + "bbox": [ + 53, + 605, + 294, + 620 + ], + "type": "text", + "content": "[39] OpenAI. 2025. OpenAI o3-mini. https://openai.com/index/openai-o3-mini/ Accessed: 2025-04-15." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 53, + 620, + 294, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 620, + 294, + 652 + ], + "spans": [ + { + "bbox": [ + 53, + 620, + 294, + 652 + ], + "type": "text", + "content": "[40] Amrith Setlur, Saurabh Garg, Xinyang Geng, Naman Garg, Virginia Smith, and Aviral Kumar. 2025. RI on incorrect synthetic data scales the efficiency of lll math reasoning by eight-fold. Advances in Neural Information Processing Systems 37 (2025), 43000-43031." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 53, + 653, + 294, + 676 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 653, + 294, + 676 + ], + "spans": [ + { + "bbox": [ + 53, + 653, + 294, + 676 + ], + "type": "text", + "content": "[41] Dhruv Shah, Blazej Osinski, Sergey Levine, et al. 2023. Lm-nav: Robotic navigation with large pre-trained models of language, vision, and action. In Conference on robot learning. PMLR, 492–504." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 53, + 676, + 294, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 676, + 294, + 708 + ], + "spans": [ + { + "bbox": [ + 53, + 676, + 294, + 708 + ], + "type": "text", + "content": "[42] Alessandro Suglia, Claudio Greco, Katie Baker, Jose L Part, Ioannis Papaioannou, Arash Eshghi, Ioannis Konstas, and Oliver Lemon. 2024. Alanavlm: A multimodal embodied ai foundation model for egocentric video understanding. arXiv preprint arXiv:2406.13807 (2024)." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 317, + 87, + 558, + 708 + ], + "type": "list", + "angle": 0, + "index": 49, + "blocks": [ + { + "bbox": [ + 317, + 87, + 558, + 110 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 87, + 558, + 110 + ], + "spans": [ + { + "bbox": [ + 317, + 87, + 558, + 110 + ], + "type": "text", + "content": "[43] Guangzhi Sun, Yudong Yang, Jimin Zhuang, Changli Tang, Yixuan Li, Wei Li, Zejun MA, and Chao Zhang. 2025. video-SALMONN-01: Reasoning-enhanced Audio-visual Large Language Model. arXiv preprint arXiv:2502.11775 (2025)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 317, + 111, + 558, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 111, + 558, + 142 + ], + "spans": [ + { + "bbox": [ + 317, + 111, + 558, + 142 + ], + "type": "text", + "content": "[44] Gemini Team, Rohan Anil, Sebastian Borgeaud, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, Katie Millican, et al. 2023. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023)." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 317, + 142, + 558, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 142, + 558, + 167 + ], + "spans": [ + { + "bbox": [ + 317, + 142, + 558, + 167 + ], + "type": "text", + "content": "[45] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with lms. arXiv preprint arXiv:2501.12599 (2025)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 317, + 167, + 558, + 182 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 167, + 558, + 182 + ], + "spans": [ + { + "bbox": [ + 317, + 167, + 558, + 182 + ], + "type": "text", + "content": "[46] Qwen Team. 2024. Qwen-VL-Max. https://qwenlm.github.io/blog/qwen-vl-max/. Accessed: 2025-04-12." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 317, + 182, + 558, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 182, + 558, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 182, + 558, + 198 + ], + "type": "text", + "content": "[47] Qwen Team. 2024. QwQ: Reflect Deeply on the Boundaries of the Unknown. https://qwenlm.github.io/blog/qwq-32b-preview/" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 198, + 558, + 231 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 198, + 558, + 231 + ], + "spans": [ + { + "bbox": [ + 317, + 198, + 558, + 231 + ], + "type": "text", + "content": "[48] Omkar Thawakar, Dinura Dissanayake, Ketan More, Ritesh Thawkar, Ahmed Heakl, Noor Ahsan, Yuhao Li, Mohammed Zumri, Jean Lahoud, Rao Muhammad Anwer, et al. 2025. Llamav-o1: Rethinking step-by-step visual reasoning in llms. arXiv preprint arXiv:2501.06186 (2025)." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 231, + 558, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 231, + 558, + 255 + ], + "spans": [ + { + "bbox": [ + 317, + 231, + 558, + 255 + ], + "type": "text", + "content": "[49] Dennis Ulmer, Martin Gubri, Hwaran Lee, Sangdoo Yun, and Seong Joon Oh. 2024. Calibrating large language models using their generations only. arXiv preprint arXiv:2403.05973 (2024)." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 255, + 558, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 255, + 558, + 294 + ], + "spans": [ + { + "bbox": [ + 317, + 255, + 558, + 294 + ], + "type": "text", + "content": "[50] Fali Wang, Zhiwei Zhang, Xianren Zhang, Zongyu Wu, Tzuhao Mo, Qiuhao Lu, Wanjing Wang, Rui Li, Junjie Xu, Xianfeng Tang, et al. 2024. A comprehensive survey of small language models in the era of large language models: Techniques, enhancements, applications, collaboration with llms, and trustworthiness. arXiv preprint arXiv:2411.03350 (2024)." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 317, + 294, + 558, + 326 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 294, + 558, + 326 + ], + "spans": [ + { + "bbox": [ + 317, + 294, + 558, + 326 + ], + "type": "text", + "content": "[51] Jiayu Wang, Yifei Ming, Zhenmei Shi, Vibhav Vineet, Xin Wang, Sharon Li, and Neel Joshi. 2024. Is a picture worth a thousand words? delving into spatial reasoning for vision language models. Advances in Neural Information Processing Systems 37 (2024), 75392-75421." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 317, + 326, + 558, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 326, + 558, + 365 + ], + "spans": [ + { + "bbox": [ + 317, + 326, + 558, + 365 + ], + "type": "text", + "content": "[52] Tai Wang, Xiaohan Mao, Chenming Zhu, Runsen Xu, Ruiyuan Lyu, Peisen Li, Xiao Chen, Wenwei Zhang, Kai Chen, Tianfan Xue, et al. 2024. Embodiedscan: A holistic multi-modal 3d perception suite towards embodied ai. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1975-1976." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 317, + 365, + 558, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 365, + 558, + 397 + ], + "spans": [ + { + "bbox": [ + 317, + 365, + 558, + 397 + ], + "type": "text", + "content": "[53] Zhecan Wang, Garrett Bingham, Adams Wei Yu, Quoc V Le, Thang Luong, and Golnaz Ghiasi. 2024. Haloquest: A visual hallucination dataset for advancing multimodal reasoning. In European Conference on Computer Vision. Springer, 288-304." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 317, + 398, + 558, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 398, + 558, + 429 + ], + "spans": [ + { + "bbox": [ + 317, + 398, + 558, + 429 + ], + "type": "text", + "content": "[54] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems 35 (2022), 24824-24837." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 317, + 430, + 558, + 461 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 430, + 558, + 461 + ], + "spans": [ + { + "bbox": [ + 317, + 430, + 558, + 461 + ], + "type": "text", + "content": "[55] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-RL: Unleashing LLM Reasoning with Rule-Based Reinforcement Learning. arXiv preprint arXiv:2502.14768 (2025)." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 317, + 461, + 558, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 461, + 558, + 501 + ], + "spans": [ + { + "bbox": [ + 317, + 461, + 558, + 501 + ], + "type": "text", + "content": "[56] Cheng Xu, Xiaofeng Hou, Jiacheng Liu, Chao Li, Tianhao Huang, Xiaozhi Zhu, Mo Niu, Lingyu Sun, Peng Tang, Tongqiao Xu, et al. 2023. Mmbench: Benchmarking end-to-end multi-modal dnns and understanding their hardware-software implications. In 2023 IEEE International Symposium on Workload Characterization (IISWC). IEEE, 154-166." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 317, + 501, + 558, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 501, + 558, + 533 + ], + "spans": [ + { + "bbox": [ + 317, + 501, + 558, + 533 + ], + "type": "text", + "content": "[57] An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, et al. 2024. Qwen2.5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122 (2024)." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 317, + 533, + 558, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 533, + 558, + 557 + ], + "spans": [ + { + "bbox": [ + 317, + 533, + 558, + 557 + ], + "type": "text", + "content": "[58] Jihan Yang, Shusheng Yang, Anjali W Gupta, Rilyn Han, Li Fei-Fei, and Saining Xie. 2024. Thinking in space: How multimodal large language models see, remember, and recall spaces. arXiv preprint arXiv:2412.14171 (2024)." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 317, + 557, + 558, + 589 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 557, + 558, + 589 + ], + "spans": [ + { + "bbox": [ + 317, + 557, + 558, + 589 + ], + "type": "text", + "content": "[59] Weihao Zeng, Yuzhen Huang, Wei Liu, Keqing He, Qian Liu, Zejun Ma, and Junxian He. 2025. 7B Model and 8K Examples: Emerging Reasoning with Reinforcement Learning is Both Effective and Efficient. https://hkust-nlp.notion.site/simplerl-reason. Notion Blog." + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 317, + 589, + 558, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 589, + 558, + 613 + ], + "spans": [ + { + "bbox": [ + 317, + 589, + 558, + 613 + ], + "type": "text", + "content": "[60] Dan Zhang, Sining Zhoubian, Ziniu Hu, Yisong Yue, Yuxiao Dong, and Jie Tang. 2025. Rest-mcts*: LIm self-training via process reward guided tree search. Advances in Neural Information Processing Systems 37 (2025), 64735-64772." + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 317, + 613, + 558, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 613, + 558, + 628 + ], + "spans": [ + { + "bbox": [ + 317, + 613, + 558, + 628 + ], + "type": "text", + "content": "[61] Yiming Zhang, Nicholas Carlini, and Daphne Ippolito. 2023. Effective prompt extraction from language models. arXiv preprint arXiv:2307.06865 (2023)." + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 317, + 628, + 558, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 628, + 558, + 668 + ], + "spans": [ + { + "bbox": [ + 317, + 628, + 558, + 668 + ], + "type": "text", + "content": "[62] Baining Zhao, Jianjie Fang, Zichao Dai, Ziyou Wang, Jirong Zha, Weichen Zhang, Chen Gao, Yue Wang, Jinqiang Cui, Xinlei Chen, and Yong Li. 2025. UrbanVideo-Bench: Benchmarking Vision-Language Models on Embodied Intelligence with Video Data in Urban Spaces. arXiv:2503.06157 [cs.CV] https://arxiv.org/abs/2503.06157" + } + ] + } + ], + "index": 46 + }, + { + "bbox": [ + 317, + 668, + 558, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 668, + 558, + 693 + ], + "spans": [ + { + "bbox": [ + 317, + 668, + 558, + 693 + ], + "type": "text", + "content": "[63] Theodore Zhao, Mu Wei, J Samuel Preston, and Hoifung Poon. 2023. Automatic Calibration and Error Correction for Generative Large Language Models via Pareto Optimal Self-Supervision. (2023)." + } + ] + } + ], + "index": 47 + }, + { + "bbox": [ + 317, + 693, + 558, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 693, + 558, + 708 + ], + "spans": [ + { + "bbox": [ + 317, + 693, + 558, + 708 + ], + "type": "text", + "content": "[64] Karl Zilles and Katrin Amunts. 2010. Centenary of Brodmann's map—conception and fate. Nature Reviews Neuroscience 11, 2 (2010), 139-145." + } + ] + } + ], + "index": 48 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 51, + 83, + 123, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 83, + 123, + 97 + ], + "spans": [ + { + "bbox": [ + 51, + 83, + 123, + 97 + ], + "type": "text", + "content": "A Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 51, + 99, + 185, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 99, + 185, + 111 + ], + "spans": [ + { + "bbox": [ + 51, + 99, + 185, + 111 + ], + "type": "text", + "content": "A.1 Dataset Introduction" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 114, + 295, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 114, + 295, + 245 + ], + "spans": [ + { + "bbox": [ + 50, + 114, + 295, + 245 + ], + "type": "text", + "content": "UrbanVideo-Bench: UrbanVideo-Bench is one of the training and testing datasets designed for embodied reasoning (embodied-r). This benchmark was proposed by Tsinghua University in February 2025. It captures two embodied characteristics of urban environments: complex urban scenes featuring dynamic and static elements, and unique aerial navigation scenarios. The dataset consists of 4 categories and 16 tasks, aimed at evaluating Video-LLMs in terms of recall, perception, reasoning, and navigation capabilities. In our paper, we focus on 4 of these complex tasks for reinforcement learning in video-based learning: Landmark Position, Counterfactual Reasoning, Progress Evaluation, and Action Generation, which represent challenging embodied outdoor tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 246, + 295, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 246, + 295, + 355 + ], + "spans": [ + { + "bbox": [ + 50, + 246, + 295, + 355 + ], + "type": "text", + "content": "VSI-Bench: VSI-Bench is another training and testing dataset for embodied reasoning (embodied-r). Proposed by Fei-Fei Li's team at Stanford in December 2024, this benchmark provides high-quality evaluation metrics for assessing the 3D, video-based, visual-spatial intelligence of multimodal large language models (MLLMs). The dataset comprises 2 categories and 8 tasks designed to evaluate key aspects of spatial reasoning. In our paper, we focus on 4 tasks for reinforcement learning in video-based learning: Relative Distance, Relative Direction, Route Planning, and Appearance Order, all of which are categorized as challenging embodied outdoor tasks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 356, + 295, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 356, + 295, + 432 + ], + "spans": [ + { + "bbox": [ + 50, + 356, + 295, + 432 + ], + "type": "text", + "content": "EgoSchema: EgoSchema is one of the Out-of-Distribution (OOD) datasets utilized to evaluate the generalization capability of our model. This dataset is specifically designed as a long-form video question-answering benchmark, aimed at assessing modern vision and language systems' ability to understand and reason over extended video content. It provides a rigorous evaluation framework for long video understanding tasks." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 432, + 295, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 432, + 295, + 498 + ], + "spans": [ + { + "bbox": [ + 50, + 432, + 295, + 498 + ], + "type": "text", + "content": "MVBench: MVBench is another Out-of-Distribution (OOD) dataset employed to test the generalization capability of our model. MVBench consists of 20 complex video tasks, offering a comprehensive benchmark for evaluating the video understanding capabilities of existing multimodal models. This dataset is designed to address diverse and challenging scenarios in video-based reasoning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 51, + 507, + 235, + 519 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 507, + 235, + 519 + ], + "spans": [ + { + "bbox": [ + 51, + 507, + 235, + 519 + ], + "type": "text", + "content": "A.2 Details of Key-Frame Extractor" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 521, + 295, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 521, + 295, + 555 + ], + "spans": [ + { + "bbox": [ + 50, + 521, + 295, + 555 + ], + "type": "text", + "content": "The goal of key-frame extraction is to ensure sufficient information gain between frames while maintaining a certain degree of overlap. The specific process is as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "spans": [ + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "text", + "content": "Step 1: a perspective transformation is used to model the geometric relationship between frames. Assuming " + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "text", + "content": " is a key-frame, to determine whether " + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "inline_equation", + "content": "f_{t + 1}" + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "text", + "content": " should also be considered a keyframe, keypoints and descriptors are calculated from " + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "inline_equation", + "content": "f_{t + 1}" + }, + { + "bbox": [ + 50, + 555, + 295, + 609 + ], + "type": "text", + "content": " using the Oriented FAST and Rotated BRIEF (ORB) algorithm:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 613, + 295, + 625 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 613, + 295, + 625 + ], + "spans": [ + { + "bbox": [ + 106, + 613, + 295, + 625 + ], + "type": "interline_equation", + "content": "\\text {K e y p o i n t s} _ {t}, \\text {D e s c r i p t o r s} _ {t} = \\mathrm {O R B} (f _ {t}), \\tag {9}", + "image_path": "c2910b624ce561498b3ac5148b8d19cdc7e11a77167631d96ba8bc9677d3060b.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 94, + 628, + 294, + 640 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 628, + 294, + 640 + ], + "spans": [ + { + "bbox": [ + 94, + 628, + 294, + 640 + ], + "type": "interline_equation", + "content": "\\text {K e y p o i n t s} _ {t + 1}, \\text {D e s c r i p t o r s} _ {t + 1} = \\operatorname {O R B} \\left(f _ {t + 1}\\right). \\tag {10}", + "image_path": "5d7121079f97ca1cb2686b2a716e2f0a0ab89e209b2921ed014ce3f689dc13b2.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "text", + "content": "Next, a feature matching algorithm, such as the Brute-Force Matcher, is applied to match the descriptors between the two frames, identifying corresponding keypoint pairs " + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_t^{\\mathrm{key}}" + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t + 1}^{\\mathrm{key}}" + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "text", + "content": ". Using the matched keypoint pairs, the Random Sample Consensus (RANSAC) algorithm is employed to estimate the homography matrix " + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "text", + "content": ", which maps the content of " + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "inline_equation", + "content": "f_{t + 1}" + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "text", + "content": " to the coordinate space of " + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "inline_equation", + "content": "f_t" + }, + { + "bbox": [ + 50, + 642, + 295, + 710 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": "Step 2: The overlap ratio between two frames is then computed. Assuming the size of each video frame is " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "w \\times h" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": ", for frames " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t} = \\{ [0,0], [w,0], [w,h], [0,h] \\}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " represents the four corner points of " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": "; " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t+1} = \\{ [0,0], [w,0], [w,h], [0,h] \\}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " represents the four corner points of " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": ". Using the homography matrix " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": ", the corner points " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t+1}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " are transformed into the coordinate space of " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t+1,i}' = \\mathbf{M} \\cdot \\mathbf{l}_{t+1,i}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t+1,i} = [x,y,1]^T" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " represents the corner points of " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " in homogeneous coordinates, and " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t+1,i}' = [x',y',w']^T" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " represents the transformed corner points. The transformed points are further normalized to recover 2D coordinates, resulting in a quadrilateral representing " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " in " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": "'s space. In " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": "'s coordinate space, there are two polygons: Polygon " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "L_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " is defined by the corner points " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " of " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "f_{t}" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": "; Polygon " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "L_{t+1}'" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " is defined by the transformed corner points " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "\\mathbf{l}_{t+1}'" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": ". Thus, the overlap ratio " + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 313, + 84, + 559, + 242 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 396, + 249, + 558, + 275 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 396, + 249, + 558, + 275 + ], + "spans": [ + { + "bbox": [ + 396, + 249, + 558, + 275 + ], + "type": "interline_equation", + "content": "c = \\frac {\\operatorname {A r e a} \\left(L _ {t} \\cap L _ {t + 1} ^ {\\prime}\\right)}{\\operatorname {A r e a} _ {\\text {t o t a l}}}. \\tag {11}", + "image_path": "56d31757223b443053a84dfd4c15a0d5d2b58369a64b1d2b25eb891bf669ce7f.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "spans": [ + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "content": "If " + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "content": " is less than a predefined threshold " + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "inline_equation", + "content": "\\varepsilon" + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "content": ", it indicates significant visual changes between the frames, and " + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "inline_equation", + "content": "f_{t+1}" + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "content": " is marked as a key-frame. Otherwise, the algorithm proceeds to calculate the overlap ratio between " + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "inline_equation", + "content": "f_t" + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "inline_equation", + "content": "f_{t+2}" + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "content": ". This process continues until a new key-frame is identified, which then becomes the reference for subsequent frames. Considering the effect of viewpoint changes, rotations (both horizontal and vertical) result in larger field-of-view variations, leading to more frames being recorded during these movements. If the indices of the extracted keyframes are denoted as " + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "inline_equation", + "content": "\\mathbf{f}' = [f_{k_0}, f_{k_1}, \\dots, f_{k_n}]" + }, + { + "bbox": [ + 313, + 281, + 559, + 402 + ], + "type": "text", + "content": ", the keyframe extraction process can be summarized as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 398, + 408, + 558, + 419 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 398, + 408, + 558, + 419 + ], + "spans": [ + { + "bbox": [ + 398, + 408, + 558, + 419 + ], + "type": "interline_equation", + "content": "\\mathbf {f} ^ {\\prime} = \\mathrm {K} - \\text {E x t r a c t o r} (\\mathbf {f}). \\tag {12}", + "image_path": "3bcfbaeaa5a6a8192aac0e4b3e9daf48ccbad2134dfc27424815135ee806a7fd.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 432, + 480, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 432, + 480, + 445 + ], + "spans": [ + { + "bbox": [ + 315, + 432, + 480, + 445 + ], + "type": "text", + "content": "A.3 Details of Data Preparation" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 446, + 558, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 446, + 558, + 490 + ], + "spans": [ + { + "bbox": [ + 313, + 446, + 558, + 490 + ], + "type": "text", + "content": "A.3.1 Task Selection Criteria. In our study, we carefully selected specific tasks that emphasize spatial reasoning capabilities during motion within three-dimensional physical space. The selection process was guided by several key considerations:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 491, + 559, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 491, + 559, + 589 + ], + "spans": [ + { + "bbox": [ + 313, + 491, + 559, + 589 + ], + "type": "text", + "content": "Focus on Reasoning Processes: We prioritized tasks that require deep cognitive processing rather than simple recognition or recall. As highlighted in the main text, embodied spatial reasoning involves complex spatio-temporal relationships where agents must discover object associations across frames and extract task-relevant semantics. For instance, navigation tasks require agents to infer their location from historical observations, construct mental maps, develop high-level plans, and determine specific actions—processes that demand sophisticated reasoning capabilities." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 589, + 559, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 589, + 559, + 654 + ], + "spans": [ + { + "bbox": [ + 313, + 589, + 559, + 654 + ], + "type": "text", + "content": "Diversity in Spatial Contexts: To ensure comprehensive evaluation, we selected tasks from both indoor (VSI-Bench) and outdoor (UrbanVideo-Bench) environments, providing diverse spatial contexts that test different aspects of embodied reasoning. This diversity is crucial for evaluating the generalizability of our approach across varying spatial scales and environmental complexities." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 313, + 655, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 655, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 655, + 559, + 710 + ], + "type": "text", + "content": "Emphasis on Long Reasoning Chains: We specifically targeted tasks characterized by long spatial reasoning chains and historically low accuracy rates. These challenging tasks better demonstrate the value of our \"slow thinking\" approach, which encourages thorough reasoning before generating responses—similar to how" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 96, + 115, + 251, + 237 + ], + "blocks": [ + { + "bbox": [ + 50, + 83, + 295, + 105 + ], + "lines": [ + { + "bbox": [ + 50, + 83, + 295, + 105 + ], + "spans": [ + { + "bbox": [ + 50, + 83, + 295, + 105 + ], + "type": "text", + "content": "Table 4: Hyperparameters used in reinforcement learning training of Embodied-R." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 96, + 115, + 251, + 237 + ], + "lines": [ + { + "bbox": [ + 96, + 115, + 251, + 237 + ], + "spans": [ + { + "bbox": [ + 96, + 115, + 251, + 237 + ], + "type": "table", + "html": "
HyperparameterValue
OptimizerAdamW
Learning Rate5e-7
Temperature1.0
Train Batch Size32
Rollout Size8
KL Coefficient0.001
Maximum Response Length2048
Input Length6144
Training Epochs12
", + "image_path": "7ccac7711d85d0b54aad2b7e8ab68869f72c239a6fc670904f642d5397345cc2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 50, + 249, + 295, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 249, + 295, + 270 + ], + "spans": [ + { + "bbox": [ + 50, + 249, + 295, + 270 + ], + "type": "text", + "content": "recent advances in mathematical and scientific reasoning have benefited from reinforcement learning techniques." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 271, + 295, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 271, + 295, + 316 + ], + "spans": [ + { + "bbox": [ + 50, + 271, + 295, + 316 + ], + "type": "text", + "content": "Deterministic Evaluation: All selected tasks were formulated as multiple-choice question-answering problems to ensure determinism in answers, facilitating both RL training and direct calculation of accuracy for performance evaluation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 320, + 294, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 320, + 294, + 353 + ], + "spans": [ + { + "bbox": [ + 50, + 320, + 294, + 353 + ], + "type": "text", + "content": "A.3.2 Question Filtering Methodology. To ensure the quality and validity of our dataset, we implemented a rigorous question filtering process:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 354, + 295, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 354, + 295, + 418 + ], + "spans": [ + { + "bbox": [ + 50, + 354, + 295, + 418 + ], + "type": "text", + "content": "Blind Testing Filter: We first evaluated questions using an untrained 7B language model without video input (blind selection). Questions that could be correctly answered without visual information were identified as potentially problematic, as they might rely more on textual patterns or common knowledge rather than genuine spatial reasoning based on video content." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 419, + 295, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 419, + 295, + 442 + ], + "spans": [ + { + "bbox": [ + 50, + 419, + 295, + 442 + ], + "type": "text", + "content": "SFT-based Filtering: After conducting supervised fine-tuning (SFT) without video inputs, we analyzed which question types" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 84, + 559, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 84, + 559, + 139 + ], + "spans": [ + { + "bbox": [ + 313, + 84, + 559, + 139 + ], + "type": "text", + "content": "showed significant improvement in accuracy. Categories where the model's performance increased substantially without visual information were flagged for removal, as this indicated strong correlations between question text and answers that could be exploited without actual spatial reasoning." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 314, + 140, + 559, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 140, + 559, + 160 + ], + "spans": [ + { + "bbox": [ + 314, + 140, + 559, + 160 + ], + "type": "text", + "content": "Correlation Analysis: We specifically eliminated question types where:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 334, + 163, + 558, + 229 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 334, + 163, + 558, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 163, + 558, + 184 + ], + "spans": [ + { + "bbox": [ + 334, + 163, + 558, + 184 + ], + "type": "text", + "content": "- The model could achieve high accuracy without accessing video content" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 334, + 185, + 558, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 185, + 558, + 206 + ], + "spans": [ + { + "bbox": [ + 334, + 185, + 558, + 206 + ], + "type": "text", + "content": "- Performance improved dramatically after text-only SFT training" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 334, + 207, + 558, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 207, + 558, + 229 + ], + "spans": [ + { + "bbox": [ + 334, + 207, + 558, + 229 + ], + "type": "text", + "content": "- Question-answer pairs exhibited strong textual patterns that could be exploited without spatial understanding" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 314, + 231, + 559, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 231, + 559, + 297 + ], + "spans": [ + { + "bbox": [ + 314, + 231, + 559, + 297 + ], + "type": "text", + "content": "This filtering methodology ensured that our final dataset genuinely tests embodied spatial reasoning capabilities rather than linguistic pattern matching or prior knowledge exploitation. By removing questions with strong text-answer correlations, we created a more challenging and valid benchmark that requires models to truly understand spatial relationships from video content." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 315, + 306, + 449, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 306, + 449, + 319 + ], + "spans": [ + { + "bbox": [ + 315, + 306, + 449, + 319 + ], + "type": "text", + "content": "A.4 RL Hyperparameters" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 320, + 559, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 320, + 559, + 441 + ], + "spans": [ + { + "bbox": [ + 313, + 320, + 559, + 441 + ], + "type": "text", + "content": "The reinforcement learning (RL) training of Embodied-R requires careful hyperparameter tuning to balance computational efficiency with model performance. We conducted extensive experiments to determine the optimal configuration for our collaborative framework. The key hyperparameters used in our RL training process are summarized in Table 4. These settings were selected to ensure stable training while maximizing the model's embodied reasoning capabilities. Notably, we used a relatively small learning rate (5e-7) to prevent catastrophic forgetting and a moderate KL coefficient (0.001) to maintain proximity to the reference model while allowing sufficient exploration." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_content_list.json b/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..fd309abf77f43fe8797c7e5f09bd8070d522fbe9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_content_list.json @@ -0,0 +1,3389 @@ +[ + { + "type": "text", + "text": "ToolRL: Reward is All Tool Learning Needs", + "text_level": 1, + "bbox": [ + 270, + 90, + 727, + 111 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Cheng Qian, Emre Can Acikgoz, Qi He, Hongru Wang, Xiusi Chen, Dilek Hakkani-Tür, Gokhan Tur, Heng Ji", + "text_level": 1, + "bbox": [ + 208, + 139, + 794, + 173 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "University of Illinois Urbana-Champaign", + "bbox": [ + 331, + 174, + 665, + 189 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{chengq9, hengji}@illinois.edu", + "bbox": [ + 347, + 190, + 650, + 206 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Current Large Language Models (LLMs) often undergo supervised fine-tuning (SFT) to acquire tool use capabilities. However, SFT struggles to generalize to unfamiliar or complex tool use scenarios. Recent advancements in reinforcement learning (RL), particularly with R1-like models, have demonstrated promising reasoning and generalization abilities. Yet, reward design for tool use presents unique challenges: multiple tools may be invoked with diverse parameters, and coarse-grained reward signals, such as answer matching, fail to offer the fine-grained feedback required for effective learning. In this work, we present the first comprehensive study on reward design for tool selection and application tasks within the RL paradigm. We systematically explore a wide range of reward strategies, analyzing their types, scales, granularity, and temporal dynamics. Building on these insights, we propose a principled reward design tailored for tool use tasks and apply it to train LLMs using Group Relative Policy Optimization (GRPO). Empirical evaluations across diverse benchmarks demonstrate that our approach yields robust, scalable, and stable training, achieving a $17\\%$ improvement over base models and a $15\\%$ gain over SFT models. These results highlight the critical role of thoughtful reward design in enhancing the tool use capabilities and generalization performance of LLMs. All the code are released to facilitate future research. $^{1}$", + "bbox": [ + 141, + 290, + 460, + 744 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 760, + 258, + 776 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in Large Language Models (LLMs) have showcased remarkable capabilities in complex reasoning tasks (Kumar et al., 2025). Among the techniques that have significantly contributed to this progress, Reinforcement Learning (RL) has emerged as a powerful paradigm, enabling", + "bbox": [ + 112, + 787, + 489, + 885 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Task Goal: Irrelevant Tool Detection (LLM should reject in appropriate tools)", + "text_level": 1, + "bbox": [ + 549, + 261, + 845, + 272 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5f522180c398c809905b7e102f535a0a6044a6775b228ce7b2816e62b8244824.jpg", + "image_caption": [ + "Figure 1: SFT on distilled deep-thinking trajectories suffers from overthinking and limited generalization." + ], + "image_footnote": [], + "bbox": [ + 534, + 274, + 557, + 291 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "What's the distance between San Francisco and Los Angeles in kilometers?", + "text_level": 1, + "bbox": [ + 564, + 278, + 855, + 288 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{Name: get_date, Parameters: {location_1: str, location_2: str, unit: str}}", + "text_level": 1, + "bbox": [ + 544, + 294, + 823, + 304 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SFT Model", + "text_level": 1, + "bbox": [ + 534, + 313, + 581, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(Distilled from R1 Long Thought)", + "text_level": 1, + "bbox": [ + 534, + 321, + 667, + 329 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": " The user wants to find the distance in kilometers between San Francisco and Los Angeles. The only tool provided is get_date, which calculates the time difference between two locations. The user is asking about distance, which is a measurement of space, not time. But wait, maybe there's a", + "bbox": [ + 534, + 331, + 727, + 375 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "space, not often. But, maybe it's a misunderstanding Over-Interpretation! perhaps", + "bbox": [ + 534, + 375, + 717, + 384 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "the tool can still be used if possible. Maybe the tool's parameters are misnamed, and it actually", + "bbox": [ + 534, + 384, + 707, + 399 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "returns distance. Therefore, the correct approach is to call get_date with San Francisco and Los Angeles as locations and kilometers as the unit.", + "bbox": [ + 534, + 399, + 727, + 422 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": " {Name: get date, Parameters:", + "bbox": [ + 534, + 424, + 702, + 432 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "location 1: San Francisco, location 2: Los", + "bbox": [ + 534, + 432, + 702, + 439 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$\\text{Angeles,unit:kilometers}\\} \\} < /$ tool_call>", + "bbox": [ + 534, + 439, + 687, + 449 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "RL Model", + "text_level": 1, + "bbox": [ + 742, + 313, + 784, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "(Trained through GRPO)", + "text_level": 1, + "bbox": [ + 742, + 321, + 842, + 329 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": " The user wants to find the distance between San Francisco and Los Angeles in kilometers. However, the available tool 'get_date' is not suitable for calculating", + "bbox": [ + 742, + 329, + 853, + 375 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "distances.", + "text_level": 1, + "bbox": [ + 742, + 375, + 818, + 384 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The tool get_date'is not suitable for calculating the distance between two cities.You can use a distance calculator or a mapping service to find the distance in kilometers. ", + "bbox": [ + 742, + 384, + 850, + 448 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "LLMs to develop emergent capabilities such as self-reflection, self-correction, and long-horizon planning (Guo et al., 2025; Team et al., 2025). These capabilities have been instrumental in the success of models like o1 and R1, particularly in mathematical and logical reasoning domains (Qin et al., 2024a; Huang et al., 2024; Li et al., 2025b; Kang et al., 2025).", + "bbox": [ + 507, + 518, + 884, + 646 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Beyond traditional reasoning tasks, an increasingly important area is Tool-Integrated Reasoning (TIR). TIR involves LLMs interacting with external tools, such as search engines (Jin et al., 2025; Zheng et al., 2025), calculators (Chen et al., 2023b; Qin et al., 2023), or code interpreters (Gou et al., 2023; Liao et al., 2024), in a multi-step, feedback-driven loop to arrive at solutions. TIR is particularly important because it addresses core limitations of LLMs, such as outdated knowledge, calculation inaccuracy, and shallow reasoning. By integrating external tools that offer real-time access and specialized capabilities, TIR enables models to tackle complex tasks in a more grounded and goal-directed way.", + "bbox": [ + 507, + 646, + 884, + 888 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Unlike textual reasoning, which primarily involves deduction and inference from static text,", + "bbox": [ + 507, + 889, + 882, + 919 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.13958v1 [cs.LG] 16 Apr 2025", + "bbox": [ + 21, + 306, + 60, + 722 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1 Data and codes released at https://github.com/qiancheng@/ToolRL", + "bbox": [ + 112, + 894, + 487, + 919 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/d9d1c4a75b58af36c4b5eb4342807cfac4846cca8d8a8fc7880fdfaabb685a3c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 115, + 82, + 489, + 160 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/5420e28d1d217f54de626224d6cf029c9d3c8b75190b3803174eadbd95a48981.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 115, + 160, + 489, + 229 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/52af26f9a24c98cd2e0c9763ebf10c2cc361be5015d3ce094e8fa5770096f803.jpg", + "image_caption": [ + "Figure 2: Main results (left) and reward trends over training steps for GRPO Cold Start across four models (right). GRPO Cold Start, equipped with our proposed reward design, consistently achieves the highest performance, with reward curves showing a rapid increase during training." + ], + "image_footnote": [], + "bbox": [ + 115, + 229, + 489, + 329 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/f9ea052946be1f03ba66dd491116a8bea9bad8a566a674459368a7eceb182187.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 499, + 93, + 882, + 193 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c3a015e002e06dde8110a53dbf637aed44e0ebca22ae6f9b69351fadc73cf144.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 497, + 217, + 878, + 316 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "TIR additionally demands the model's ability to select appropriate tools, interpret intermediate outputs, and adaptively refine its trajectory on the fly. These dynamic and interactive reasoning skills position TIR at the core of the emerging paradigm of LLMs-as-agents. As such, TIR enables a wide range of applications, including scientific discovery (Roohani et al., 2024; Inoue et al., 2024), research automation (Baek et al., 2024; Wang et al., 2024), embodied task completion (Zhang et al., 2023; Huang et al., 2023), and everyday decision-making (Ye et al., 2023; Zhai et al., 2024).", + "bbox": [ + 112, + 407, + 489, + 602 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Training LLMs for TIR tasks has predominantly relied on Supervised Fine-Tuning (SFT), wherein existing approaches typically generate these integrated reasoning steps offline, followed by subsequent SFT on these trajectories (Chen et al., 2023a; Zeng et al., 2024; Chen et al., 2024; Acikgoz et al., 2025). While SFT is effective to some extent, it struggles with generalization, exploration, and adaptability (Chu et al., 2025; Guo et al., 2025). As illustrated in Figure 1, a model trained with SFT on deep-thinking trajectories over-interprets the tool and fails to reject the inappropriate tool, merely imitating cues like \"but wait\" without engaging in genuine deep thinking. As such, SFT often fails to capture the strategic flexibility needed for optimal tool use, particularly in open-ended or multi-step settings. This motivates a fundamental research question: Can RL-based training methods better equip LLMs with agentic tool-using capabilities,", + "bbox": [ + 112, + 615, + 489, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and if so, what is the optimal RL design for TIR?", + "bbox": [ + 507, + 409, + 873, + 425 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent efforts such as Search-R1 (Jin et al., 2025) and TORL (Li et al., 2025b) have begun to explore this direction. However, their focus is narrow: either constrained to search tools in question answering settings or code tools in math problem-solving. In contrast, our work aims to study RL-based training for general-purpose tool selection and application, across diverse and complex tool sets with different task types.", + "bbox": [ + 507, + 429, + 884, + 574 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For an RL algorithm to be effective, a well-designed reward is essential. Unlike math tasks with a single correct answer, Tool-Integrated Reasoning (TIR) tasks introduce multiple layers of complexity: they often involve multi-step interactions where each turn may require invoking multiple tools, each with carefully specified parameters. Designing effective reward signals to guide learning through this complexity remains an open and underexplored challenge. In this paper, we focus on the problem of reward design for TIR and propose a principled, generalizable framework that can be applied across various RL algorithms. While our reward design is algorithm-agnostic by nature, we empirically demonstrate its effectiveness using both Group Relative Policy Optimization (GRPO) (Shao et al., 2024) and Proximal Policy Optimization (PPO) (Schulman et al., 2017), showcasing its versatility and impact on improving tool use performance.", + "bbox": [ + 507, + 577, + 884, + 901 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We begin by formalizing the TIR task, and out", + "bbox": [ + 527, + 904, + 884, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "lining general principles for effective reward design. Building on this foundation, we show how RL algorithm can be leveraged to train LLMs for robust and context-aware tool selection and application. Empirical results demonstrate that our approach outperforms base models by $17\\%$ and SFT models by $15\\%$ across multiple tool use and QA benchmarks. Moreover, the trained model exhibits strong generalization to unseen scenarios and task objectives, along with emergent behaviors such as proactiveness and metacognitive reasoning.", + "bbox": [ + 112, + 84, + 489, + 261 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To identify optimal reward strategies, we next systematically explore a broad spectrum of reward configurations across four key dimensions: (1) reward type (what aspect to reward), (2) reward scale (how much to reward), (3) reward granularity (how detailed the reward signal is), and (4) reward dynamics (how rewards evolve over time). Through extensive experiments, we identify reward designs that best align with agentic tool use and uncover insights into what makes a reward \"useful\" for tool invoking LLMs. We summarize the core insights we derive as follows:", + "bbox": [ + 115, + 262, + 489, + 453 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Longer reasoning trace is not inherently better and length rewards can degrade performance.", + "- Dynamic reward scale helps models transition smoothly from simple to complex behaviors.", + "- Finegrained reward decomposition leads to more stable and effective learning." + ], + "bbox": [ + 115, + 457, + 485, + 560 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We also summarize the overall contributions of our paper as follows:", + "bbox": [ + 112, + 564, + 487, + 596 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We present the first systematic study on RL-based training for general-purpose tool selection and application in LLMs.", + "- We propose a principled reward design framework tailored for TIR and validate its effectiveness through RL algorithms including GRPO.", + "- We conduct extensive experiments analyzing the effects of various reward strategies and distill actionable insights for future research on LLM-agent training." + ], + "bbox": [ + 115, + 599, + 487, + 766 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This work pioneers the application of RL to general TIR and provides the first empirical roadmap for reward design in TIR, paving the way toward more capable and autonomous LLM agents.", + "bbox": [ + 112, + 769, + 487, + 834 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 112, + 846, + 270, + 862 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Tool-Integrated Reasoning of LLMs. Tool-integrated reasoning (TIR) has emerged as a promising approach to enhance the capabilities of", + "bbox": [ + 112, + 872, + 489, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LLMs. Early studies introduced the concept of equipping LLMs with external tools to overcome their inherent limitations (Schick et al., 2023; Qin et al., 2024b; Yao et al., 2023), such as program executors (Chen et al., 2022) and search engines (Vu et al., 2023). To systematically assess these enhanced capabilities, several benchmarks have been proposed to evaluate tool use performance across various dimensions, including API selection, argument generation, and generalization (Qin et al., 2024c; Patil et al., 2023; Qian et al., 2024a). Building on this foundation, subsequent research has focused on constructing high-quality tool use datasets (Liu et al., 2024; Qian et al., 2025), enabling models to autonomously create and invoke tools (Qian et al., 2023, 2024b), and applying these techniques to problems spanning different modalities (Shen et al., 2025) and specialized domains (Ling et al., 2023). More recently, reinforcement learning (RL) has been explored as an effective framework to further improve TIR, demonstrating success in tasks such as information retrieval (Jin et al., 2025) and math computation (Li et al., 2025b). These advances collectively highlight the growing potential of tool-augmented LLMs for general-purpose reasoning in open-domain settings.", + "bbox": [ + 507, + 84, + 885, + 502 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Exploration of RL in LLMs. Previous work has primarily relied on supervised fine-tuning (SFT) with carefully curated datasets to enhance LLM performance in tool use (Schick et al., 2023; Qin et al., 2024c). Recently, reinforcement learning (RL) has gained traction as a more scalable and generalizable training paradigm. The development of RL methods for LLMs has evolved from reinforcement learning from human feedback (RLHF) (Kaufmann et al., 2023) and proximal policy optimization (PPO) (Schulman et al., 2017) to more advanced techniques such as direct preference optimization (DPO) (Rafailov et al., 2023), SimPO (Meng et al., 2024), and group relative policy optimization (GRPO) (Shao et al., 2024). Extensions like dynamic sampling policy optimization (DAPO) (Yu et al., 2025) and the more recent value-based augmented proximal policy optimization (VAPO) (Yuan et al., 2025) further improve training stability and efficiency.", + "bbox": [ + 507, + 517, + 885, + 839 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Among these, GRPO (Shao et al., 2024) is specifically designed for LLMs, replacing the traditional critic with a group-based evaluation strategy. It has shown strong performance in enhancing reasoning abilities across a range of tasks, including math-", + "bbox": [ + 507, + 841, + 885, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "ematical problem solving (Shao et al., 2024; Xie et al., 2025), search engine interaction (Jin et al., 2025; Song et al., 2025), and code generation (Li et al., 2025b). Beyond task variety, recent studies have analyzed the influence of dataset scale (Li et al., 2025a) and GRPO's effectiveness in smaller model settings (Dang and Ngo, 2025). GRPO's flexible reward function enables adaptation to diverse objectives, such as assigning weights to subtasks (Yu et al., 2024) or constraining tool use frequency (Li et al., 2025b). In this work, we extend GRPO to enhance general tool use capabilities, improving LLMs' ability to select and interact with external tools across a wide range of scenarios.", + "bbox": [ + 112, + 84, + 489, + 309 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 112, + 321, + 218, + 336 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Supervised fine-tuning (SFT), as illustrated in Figure 1, often suffers from overfitting to certain patterns and constrains the model's ability to learn optimal strategies for tool use. To address this, we introduce a reinforcement learning (RL) approach for enhancing tool-integrated reasoning (TIR) in LLMs. In this section, we begin by defining the TIR task (Section 3.1), followed by our customized rollout strategy (Section 3.2) and reward design (Section 3.3). These components are then integrated into the Group Relative Policy Optimization (GRPO) framework (Shao et al., 2024) to guide model training on general TIR tasks (Section 3.4).", + "bbox": [ + 112, + 347, + 489, + 556 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Task Definition", + "text_level": 1, + "bbox": [ + 112, + 567, + 280, + 581 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Tool-Integrated Reasoning (TIR) is the process of incorporating external tools into the reasoning trajectory of an LLM to solve a user task. A typical TIR trajectory involves multiple tool invocations over several reasoning steps, with the final outcome determined by the cumulative success of these intermediate decisions.", + "bbox": [ + 112, + 588, + 489, + 699 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Formally, given a tool set $\\mathcal{T} = \\{t_1,t_2,\\dots ,t_n\\}$ containing $n$ available tools, and a user query $Q$ the reasoning trajectory up to step $k$ is denoted as:", + "bbox": [ + 112, + 701, + 489, + 750 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\ns _ {k} = (r _ {1}, \\mathcal {T} _ {1}, o _ {1}), (r _ {2}, \\mathcal {T} _ {2}, o _ {2}), \\ldots , (r _ {k}, \\mathcal {T} _ {k}, o _ {k}),\n$$\n", + "text_format": "latex", + "bbox": [ + 121, + 762, + 478, + 780 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $r_i$ denotes the model's natural language reasoning at step $i$ , $\\mathcal{T}_i \\subseteq \\mathcal{T}$ denotes the set of tool calls invoked at step $i$ , and $o_i$ denotes the observation received after executing tools in $\\mathcal{T}_i$ , possibly including both environment and user feedback.", + "bbox": [ + 112, + 791, + 487, + 872 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "At each step $k + 1$ , the model must generate the next reasoning step $r_{k + 1}$ , select a set of tools $\\mathcal{T}_{k + 1} \\subseteq \\mathcal{T}$ , and formulate a grounded tool call (i.e.,", + "bbox": [ + 112, + 873, + 489, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "a parameterized invocation of each tool) to make progress toward solving $Q$ .", + "bbox": [ + 507, + 84, + 880, + 116 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The model's policy is defined as $\\pi : s_k \\to (r_{k+1}, \\mathcal{T}_{k+1})$ , where the model's objective at each step is to select a tool set $\\mathcal{T}_{k+1}$ that maximizes the immediate reward:", + "bbox": [ + 507, + 117, + 880, + 179 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {T} _ {k + 1} ^ {*} = \\arg \\max _ {\\mathcal {T} _ {k + 1} \\subseteq \\mathcal {T}} R (s _ {k}, \\mathcal {T} _ {k + 1}, o _ {k + 1}),\n$$\n", + "text_format": "latex", + "bbox": [ + 546, + 193, + 845, + 217 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $R(\\cdot)$ represents the reward function that evaluates progress made by invoking the tools in $\\mathcal{T}_{k + 1}$ .", + "bbox": [ + 507, + 230, + 884, + 262 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While the immediate reward at each step is maximized, the model's policy is implicitly optimized to maximize the cumulative reward over the entire trajectory, formulated as:", + "bbox": [ + 507, + 263, + 882, + 326 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {\\pi} \\mathbb {E} _ {\\pi} \\left[ \\sum_ {k = 1} ^ {K} R \\left(s _ {k}, \\mathcal {T} _ {k + 1}, o _ {k + 1}\\right) \\right],\n$$\n", + "text_format": "latex", + "bbox": [ + 561, + 337, + 828, + 381 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This formulation is valid because our training data includes ground truth tool calls at each step, allowing step-wise reward signals to guide multi-step success. Unlike QA tasks that focus solely on the final answer, tool selection and application tasks provide dense intermediate feedback. Moreover, we later demonstrate that our method enables the model to generalize to settings where tool calls are free-form and only the final outcome matters. Therefore, out task setting encourages the model to optimize tool use at each step while aligning with the overall task goal.", + "bbox": [ + 507, + 391, + 884, + 583 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 TIR Rollout", + "text_level": 1, + "bbox": [ + 507, + 594, + 653, + 608 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To enable the model to autonomously generate reasoning traces and tool calls, we utilize a system prompt as shown in Figure 4 during rollout. The Tool List placeholder denotes the tool set $\\mathcal{T}$ , which contains all tools available for invocation. We indicate in the instruction that the LLM should use special tokens , , and to indicate their thoughts, tool calls and responses in output.", + "bbox": [ + 507, + 615, + 882, + 759 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As illustrated in Figure 3, when the model output includes , we automatically parse the tool calls into individual invocations using the model-predicted parameters. The outputs from executions are then inserted into the field and appended to the dialogue history, whose format is shown in Figure 12, serving as the model's interaction trajectory. Similarly, if the output contains , the corresponding response is parsed and appended to the dialogue history.", + "bbox": [ + 507, + 760, + 884, + 921 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/68ae308fe4201a034cd5d0b2d3ae170be90697a5c0d2f6f3f221fa3f622dbd0d.jpg", + "image_caption": [ + "Figure 3: Illustration of TIR rollout and calculation of format and correctness reward." + ], + "image_footnote": [], + "bbox": [ + 115, + 80, + 884, + 272 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "It is important to note that and are not mutually exclusive; they may co-occur within a single output. The user's initial query $Q$ is placed in the Initial User Input placeholder, and any subsequent user inputs are also appended to the dialogue history when present.", + "bbox": [ + 112, + 325, + 489, + 420 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Reward Design", + "text_level": 1, + "bbox": [ + 112, + 434, + 280, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Rule-based reward mechanisms have demonstrated strong empirical performance and are commonly employed. In our training, we similarly adopt a reward formulation that combines structural and correctness-based components, in line with prior works (Jin et al., 2025; Li et al., 2025b; Xie et al., 2025). Specifically, the format reward assesses whether the model output adheres to the expected structure including thoughts, tool calls, and responses, while the correctness reward evaluates the accuracy of tool invocations. Formally, the overall reward $R_{\\mathrm{final}}(\\cdot)$ is decomposed into two components: $R_{\\mathrm{format}} + R_{\\mathrm{correct}}$ , each described in detail below:", + "bbox": [ + 112, + 457, + 489, + 680 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Format Reward. The format reward $\\mathcal{R}_{\\mathrm{format}} \\in \\{0,1\\}$ checks whether the model output contains all required special tokens in the correct order as specified by the ground truth:", + "bbox": [ + 112, + 694, + 487, + 758 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f a l l r e q u i r e d f i e l d s a p p e a r} \\\\ & \\text {a n d a r e i n t h e c o r r e c t o r d e r} \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 112, + 785, + 443, + 843 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Correctness Reward. The correctness reward $\\mathcal{R}_{\\mathrm{correct}} \\in [-3, 3]$ evaluates predicted tool calls $P = \\{P_1, \\dots, P_m\\}$ against ground-truth calls $G = \\{G_1, \\dots, G_n\\}$ . It includes three components:", + "bbox": [ + 112, + 857, + 487, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Tool Name Matching:", + "bbox": [ + 510, + 325, + 690, + 340 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {n a m e}} = \\frac {\\left| N _ {G} \\cap N _ {P} \\right|}{\\left| N _ {G} \\cup N _ {P} \\right|} \\in [ 0, 1 ]\n$$\n", + "text_format": "latex", + "bbox": [ + 613, + 349, + 794, + 378 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $N_{G}$ and $N_{P}$ are the sets of tool names extracted from the ground-truth and predicted tool calls, respectively.", + "bbox": [ + 524, + 386, + 880, + 434 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Parameter Name Matching:", + "bbox": [ + 510, + 436, + 734, + 451 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {p a r a m}} = \\sum_ {G _ {j} \\in G} \\frac {| \\mathrm {k e y s} (P _ {G}) \\cap \\mathrm {k e y s} (P _ {P}) |}{| \\mathrm {k e y s} (P _ {G}) \\cup \\mathrm {k e y s} (P _ {P}) |} \\in [ 0, | G | ]\n$$\n", + "text_format": "latex", + "bbox": [ + 547, + 458, + 857, + 494 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\mathrm{keys}(P_G)$ and $\\mathrm{keys}(P_P)$ represent the parameter names of the predicted and ground-truth tool calls, respectively.", + "bbox": [ + 524, + 504, + 882, + 552 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Parameter Content Matching:", + "bbox": [ + 510, + 554, + 749, + 569 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} r _ {\\text {v a l u e}} = \\sum_ {G _ {j} \\in G} \\sum_ {k \\in \\text {k e y s} (G _ {j})} \\mathbb {1} \\left[ P _ {G} [ k ] = P _ {P} [ k ] \\right] \\\\ \\in [ 0, \\sum_ {G _ {j} \\in G} | \\mathrm {k e y s} (G _ {j}) | ] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 577, + 838, + 642 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $P_{G}[k]$ and $P_{P}[k]$ represent the values of the parameters for the predicted and ground truth tool calls.", + "bbox": [ + 524, + 651, + 880, + 697 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- Total match score for each match is:", + "bbox": [ + 510, + 701, + 794, + 715 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {m a t c h}} = r _ {\\text {n a m e}} + r _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} \\in [ 0, S _ {\\max} ]\n$$\n", + "text_format": "latex", + "bbox": [ + 571, + 726, + 835, + 741 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $S_{\\mathrm{max}} = 1 + |G| + \\sum_{G_j \\in G} |\\mathrm{keys}(G_j)|$ denotes the maximum possible score.", + "bbox": [ + 526, + 751, + 880, + 784 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The total score is computed by finding the optimal matching between $P$ and $G$ to maximize the total match score:", + "bbox": [ + 507, + 794, + 880, + 840 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} _ {\\text {c o r r e c t}} = 6 \\cdot \\frac {R _ {\\max}}{S _ {\\max}} - 3 \\in [ - 3, 3 ]\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 848, + 823, + 881 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $R_{\\mathrm{max}}$ denotes the total match score from the optimal matching. The final correctness reward", + "bbox": [ + 507, + 889, + 880, + 921 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "System Prompt for Training", + "text_level": 1, + "bbox": [ + 196, + 85, + 374, + 98 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "You are a helpful dialogue assistant capable of leveraging tool calls to solve user tasks and provide structured chat responses.", + "bbox": [ + 194, + 105, + 800, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Available Tools", + "text_level": 1, + "bbox": [ + 196, + 146, + 302, + 158 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In your response, you can use the following tools:", + "bbox": [ + 196, + 160, + 509, + 173 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "{{ToolList}}", + "bbox": [ + 196, + 174, + 282, + 187 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Steps for Each Turn", + "text_level": 1, + "bbox": [ + 196, + 200, + 332, + 212 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Think: Recall relevant context and analyze the current user goal.", + "2. Decide on Tool Usage: If a tool is needed, specify the tool and its parameters.", + "3. Respond Appropriately: If a response is needed, generate one while maintaining consistency across user queries." + ], + "bbox": [ + 196, + 214, + 800, + 268 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Output Format", + "text_level": 1, + "bbox": [ + 196, + 281, + 302, + 294 + ], + "page_idx": 5 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": " Your thoughts and reasoning \n \n{“name”: “Tool name”, “parameters”: {“Parameter name”: “Parameter content”, “... ...”: “... ...”} \n{“name”: “... ...”, “parameters”: {“... ...”: “... ...”, “... ...”: “... ...”} \n... \n \n AI's final response ", + "guess_lang": "txt", + "bbox": [ + 194, + 294, + 800, + 390 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Important Notes", + "text_level": 1, + "bbox": [ + 196, + 403, + 310, + 416 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. You must always include the field to outline your reasoning. Provide at least one of or . Decide whether to use (possibly multiple times), , or both.", + "2. You can invoke multiple tool calls simultaneously in the fields. Each tool call should be a JSON object with a \"name\" field and a \"parameters\" field containing a dictionary of parameters. If no parameters are needed, leave the \"parameters\" field an empty dictionary.", + "3. Refer to the previous dialogue records in the history, including the user's queries, previous , , and any tool feedback noted as (if exists)." + ], + "bbox": [ + 194, + 417, + 801, + 525 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 4: The system prompt used for TIR's rollout.", + "bbox": [ + 319, + 555, + 673, + 570 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "$\\mathcal{R}_{\\mathrm{correct}}$ is the normalized reward for the matching process. We empirically set the reward scale within the range of $[-3, 3]$ , with more analysis and ablations of reward scale presented in Section 5.", + "bbox": [ + 112, + 596, + 487, + 659 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The final reward value $\\mathcal{R}_{\\mathrm{final}}$ is finally derived as the sum of $\\mathcal{R}_{\\mathrm{format}}$ and $\\mathcal{R}_{\\mathrm{correct}}$ :", + "bbox": [ + 112, + 659, + 485, + 690 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} _ {\\text {f i n a l}} = \\mathcal {R} _ {\\text {f o r m a t}} + \\mathcal {R} _ {\\text {c o r r e c t}} \\in [ - 3, 4 ]\n$$\n", + "text_format": "latex", + "bbox": [ + 164, + 701, + 435, + 719 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Unlike prior works that often rely on binary or overly simplified reward signals, our design captures the nuanced structure of tool calls by evaluating multiple interdependent components including tool names, parameter schemas, and parameter values. This fine-grained formulation better reflects the complexity of real-world tool use, where correctness cannot be reduced to a single binary criterion. We further validate the impact of this design through comprehensive analysis in Section 5.", + "bbox": [ + 112, + 728, + 487, + 888 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, our reward design ensures a balanced and interpretable evaluation signal by explicitly", + "bbox": [ + 112, + 889, + 487, + 921 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "separating structural compliance from semantic correctness. By aligning rewards with both format adherence and fine-grained tool call accuracy, the model is guided to produce outputs that are not only syntactically valid but also semantically faithful, which is crucial for downstream tool execution and final task success.", + "bbox": [ + 507, + 596, + 884, + 706 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 RL Training with GRPO", + "text_level": 1, + "bbox": [ + 507, + 728, + 752, + 745 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To tune the model with structured rewards, we employ GRPO, a variant of PPO that introduces advantage normalization within grouped samples. This normalization helps stabilize training by reducing variance across samples that share a common input context. Let $\\pi_{\\theta}$ represent the current policy.", + "bbox": [ + 507, + 757, + 884, + 853 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Normalized Advantage Across Query Groups.", + "text_level": 1, + "bbox": [ + 507, + 873, + 884, + 889 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For each query $Q$ , its responses derived from the rollout form a group $G_{Q}$ consisting of multiple", + "bbox": [ + 507, + 890, + 882, + 922 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "responses and their corresponding reward values:", + "bbox": [ + 112, + 84, + 480, + 99 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nG _ {Q} = \\left\\{A, \\left(s _ {1}, r _ {1}\\right), \\left(s _ {2}, r _ {2}\\right), \\dots , \\left(s _ {n}, r _ {n}\\right) \\right\\}\n$$\n", + "text_format": "latex", + "bbox": [ + 142, + 115, + 455, + 131 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $A$ denotes the ground-truth annotation for $Q$ , and each reward $r_i$ is computed as the sum of the format and correctness rewards associated with response $s_i$ , i.e., $r_i = \\mathcal{R}_{\\mathrm{format}}(s_i, A) + \\mathcal{R}_{\\mathrm{correct}}(s_i, A)$ . For each group, we calculate the mean and standard deviation of the rewards:", + "bbox": [ + 112, + 147, + 489, + 242 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\mu_ {Q} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} r _ {i}, \\quad \\sigma_ {Q} = \\sqrt {\\frac {1}{n} \\sum_ {i = 1} ^ {n} (r _ {i} - \\mu_ {Q}) ^ {2}}\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 267, + 443, + 307 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Then, for each sample $s_i$ in the group, we define the normalized advantage:", + "bbox": [ + 112, + 321, + 485, + 354 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nA _ {i} (s _ {i} | Q) = \\frac {r _ {i} - \\mu_ {Q}}{\\sigma_ {Q} + \\eta}\n$$\n", + "text_format": "latex", + "bbox": [ + 231, + 363, + 369, + 390 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\eta$ is a constant to avoid division by zero.", + "bbox": [ + 117, + 397, + 468, + 412 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Policy Optimization Objective. The policy $\\pi_{\\theta}$ is optimized using the standard clipped PPO objective, adapted with our group-wise normalized advantages:", + "bbox": [ + 112, + 424, + 489, + 488 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} J _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {Q \\sim \\mathcal {D}} \\mathbb {E} _ {s _ {i} \\sim \\pi_ {\\theta}} \\left[ \\min \\left(\\frac {\\pi_ {\\theta} (s _ {i} | Q)}{\\pi_ {\\mathrm {o l d}} (s _ {i} | Q)} A _ {i} (s _ {i} | Q), \\right. \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (s _ {i} | Q)}{\\pi_ {\\mathrm {o l d}} (s _ {i} | Q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} (s _ {i} | Q)\\right) \\Bigg ] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 127, + 511, + 472, + 568 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Unlike the original GRPO formulations, we omit the KL penalty term against a reference model. This design choice encourages the model to more freely adapt its behavior to our custom response format and structured reward signals. In practice, we observe that this leads to faster convergence and comparable performance, while also simplifying the training pipeline.", + "bbox": [ + 112, + 581, + 487, + 709 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Overall, this objective guides the policy to generate structurally consistent and semantically accurate tool calls, while group-wise normalization mitigates reward variance across queries, leading to more stable and sample-efficient alignment with task-specific response requirements.", + "bbox": [ + 112, + 711, + 487, + 808 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 112, + 822, + 260, + 838 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 Training Dataset", + "text_level": 1, + "bbox": [ + 112, + 851, + 292, + 866 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To support robust tool learning through RL, we construct a mixed dataset spanning diverse tool use scenarios:", + "bbox": [ + 112, + 873, + 487, + 920 + ], + "page_idx": 6 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- ToolACE (Liu et al., 2024): A general tool use dataset where the model learns when to invoke tools versus respond directly, improving decision-making in multi-step interactions.", + "- Hammer (Masked) (Lin et al., 2024): A subset of Hammer with randomized tool and parameter names, forcing the model to rely on descriptions rather than memorized labels, thus enhancing generalization and reducing overfitting to certain tools.", + "- xLAM (Zhang et al., 2024): A compositional dataset requiring one or multiple tool calls per turn, encouraging the model to reason about tool dependencies and plan diverse tool calling action actively." + ], + "bbox": [ + 510, + 84, + 882, + 330 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "For RL training, we sample 2K examples from ToolACE and 1K each from Hammer and xLAM, creating a balanced dataset spanning diverse levels of complexity and tool use. Multi-step trajectories are decomposed into single-step instances, with prior dialogue history injected into the user prompt (as shown in Figure 12) to preserve context. This setup encourages strategic exploration and teaches the model to select and apply tools appropriately within each step. Please see Appendix B for more details and justifications.", + "bbox": [ + 507, + 334, + 882, + 511 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Experiment Settings", + "text_level": 1, + "bbox": [ + 507, + 521, + 717, + 536 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training. We conduct all RL experiments using the veRL framework (Sheng et al., 2024), adopting the GRPO algorithm detailed in the previous section. For each training step, we sample a batch of 512, and generate 4 responses per query, training for 15 epochs in total (see Appendix B for full configuration details). To encourage broader policy exploration, we remove KL regularization and apply a generation temperature of 1.0. We initialize our models with the Qwen-2.5-Instruct (Team, 2024) and Llama-3.2-Instruct (Dubey et al., 2024) series, which are further tuned under the GRPO objective with our customized reward design.", + "bbox": [ + 507, + 541, + 882, + 750 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation. We evaluate our approach on the Berkeley Function Call Leaderboard (BFCL) (Patil et al., 2024), a comprehensive benchmark that spans a diverse set of challenges, including single-step reasoning, multi-step tool use, real-time execution, irrelevant tool rejection, simultaneous multi-tool selection, and multi-tool application2. In addition, we present results on API-", + "bbox": [ + 507, + 758, + 884, + 887 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://gorilla.cs.berkeley.edu/blogs/13_bfcl_v3-multi_turn.html", + "bbox": [ + 507, + 894, + 850, + 919 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Bank (Li et al., 2023), a three-level evaluation framework comprising 73 diverse and complex API tools. It assesses an LLM's ability to select and apply tools through natural multi-turn dialogues, across three levels of difficulty. We also evaluate on a representative QA benchmark Bamboogle (Press et al., 2022), which comprises a variety of question-answering tasks where performance is measured based on the final answer accuracy rather than the correctness of tool use. These broad coverage makes our evaluation setting effective for evaluating real-world LLM tool use proficiency. All results are reported in terms of accuracy.", + "bbox": [ + 112, + 84, + 489, + 294 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Baselines. We compare our approach against several baselines to better isolate the effects of GRPO training: (1) Raw Instruct Model: the original model without any additional fine-tuning or RL, evaluated using the same prompts. (2) SFT on RL Data: the instruct model fine-tuned using the same 4K / selected 400 data points as the RL training set, providing a comparison point to assess whether GRPO training outperforms standard SFT. (3) GRPO on SFT Model: GRPO is applied to a model that has already undergone SFT on the selected 400 data points. This setup allows us to evaluate the impact of initializing GRPO with a format-aware model, in contrast to starting from the raw instruct model in a cold start manner. (4) PPO: We also include the standard PPO setting as a baseline to evaluate whether our reward design is effective beyond GRPO. We report results for both a cold start PPO model and a PPO model initialized with SFT, using the same hyperparameters as in the GRPO setup for a fair comparison. Please refer to Appendix B for more details and justifications.", + "bbox": [ + 112, + 302, + 489, + 657 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Results", + "text_level": 1, + "bbox": [ + 112, + 665, + 218, + 681 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Main Results. We report BFCL and API-Bank results in Table 1 and Table 2, respectively. Our GRPO method, trained from scratch on the Qwen2.5-Instruct series, generally outperforms other baselines, achieving $\\tilde{10}\\%$ absolute gains over SFT trained on the same data volume. In contrast, LLaMA-3.2-Instruct shows less improvement, possibly due to the model's lower adaptability to GRPO-style generalization. Nevertheless, it remains competitive and outperforms most baselines on API-Bank.", + "bbox": [ + 112, + 687, + 489, + 863 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "SFT Initialization Impacts. Interestingly, GRPO also improves models initialized with limited SFT, often outperforming full-scale SFT", + "bbox": [ + 112, + 873, + 489, + 921 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d8dc3638bde2b89fa52a2572854cae623f195a7bdeaf5d9b387e540c7b3b470f.jpg", + "image_caption": [ + "(a) Format Reward" + ], + "image_footnote": [], + "bbox": [ + 517, + 87, + 690, + 178 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/86ab19a0b1d508548569e0402244154029f504b76c0052ae772036ac379e63d7.jpg", + "image_caption": [ + "(b) Correctness Reward" + ], + "image_footnote": [], + "bbox": [ + 702, + 87, + 875, + 178 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7ac0f46c2122b5016b913482eafa9455530e12499baa0d34caae98e0d8b3c074.jpg", + "image_caption": [ + "Figure 5: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different model initialization strategies.", + "(a) Format Reward", + "Figure 6: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different RL strategies (GRPO v.s. PPO)." + ], + "image_footnote": [], + "bbox": [ + 515, + 278, + 690, + 368 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/94ced1cd69372f6e86341575a504e8b4cc1f2bf372c3c145a6fdae3920f6dcdb.jpg", + "image_caption": [ + "(b) Correctness Reward" + ], + "image_footnote": [], + "bbox": [ + 702, + 278, + 875, + 368 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "trained on 10 times more data. However, this setup still underperforms compared to cold start GRPO. We hypothesize that SFT initialization leads to memorization and overfitting, which reduces the impact of GRPO's effectiveness in generalization. As shown in Figure 5, SFT-initialized models achieve higher training rewards due to distributional alignment between SFT and RL data, but empirically generalize worse on the two benchmarks. This further highlights that higher training rewards do not necessarily translate to better generalization.", + "bbox": [ + 505, + 474, + 884, + 667 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Reward Design on PPO. We also evaluate PPO under both cold start and SFT-initialized settings to examine the effectiveness of our reward design. The results show that while PPO with a cold start can outperform SFT in some cases, it tends to be less stable across different model settings. In contrast, GRPO consistently achieves higher rewards even from a cold start, suggesting that our reward design is partially effective for PPO but works best in the GRPO framework. As shown in Figure 6, GRPO not only achieves higher correctness rewards but also gains format rewards more rapidly during training. Interestingly, PPO benefits from SFT initialization, generally yielding better results than a cold start, whereas GRPO performs better", + "bbox": [ + 507, + 678, + 884, + 921 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/a3a33f9935b2d034f4785ca3ce0edc9bfb4b0cce014fa130937d9a32138659a6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400)40.21%65.12%61.11%56.69%1.00%94.44%60.14%
Qwen2.5-1.5B-Instruct (SFT4k)40.67%59.94%59.84%59.31%1.00%88.89%71.34%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (PPO Cold Start)38.32%79.40%70.11%45.24%0.87%100.00%18.09%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400)34.08%69.29%61.50%41.40%0.00%94.44%8.11%
Qwen2.5-3B-Instruct (SFT4k)41.97%62.85%54.73%59.17%0.75%77.78%75.12%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (PPO Cold Start)51.15%82.42%78.52%67.78%4.88%94.12%73.87%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400)34.08%69.29%66.68%41.4%0.00%94.44%8.11%
Qwen2.5-7B-Instruct (SFT4k)36.53%45.15%53.5%57.13%0.75%72.22%72.32%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100.00%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100.00%14.19%
Qwen2.5-7B-Instruct (PPO Cold Start)46.68%79.33%78.16%63.17%0.38%88.89%52.92%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)58.38%86.17%78.25%74.9%18.12%83.33%76.68%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400)41.22%64.27%62.18%58.37%0.75%66.67%71.12%
Llama-3.2-3B-Instruct (SFT4k)44.16%65.42%67.02%63.04%1.38%77.78%78.25%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (PPO Cold Start)42.98%84.00%72.00%52.80%2.88%100.00%31.94%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
", + "bbox": [ + 119, + 80, + 878, + 354 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/e4d175360576c3fb81a40f514fde4894f0c62e6bb1915ba240566fc0a964c1a5.jpg", + "table_caption": [ + "Table 1: BFCL V3 Benchmark Results (Main Result)" + ], + "table_footnote": [], + "table_body": "
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400)53.60%57.14%50.75%44.27%
Qwen2.5-1.5B-Instruct (SFT4k)47.07%52.88%52.24%26.72%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (PPO Cold Start)40.54%44.61%31.34%32.82%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)63.15%70.68%61.19%41.22%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400)52.76%59.65%50.75%32.82%
Qwen2.5-3B-Instruct (SFT4k)50.92%55.64%43.28%40.46%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (PPO Cold Start)57.62%64.66%59.70%35.11%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)67.00%73.43%67.16%47.33%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400)50.59%55.89%50.75%34.35%
Qwen2.5-7B-Instruct (SFT4k)47.07%51.13%34.33%41.22%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.40%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (PPO Cold Start)61.64%68.67%44.78%48.85%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)64.66%73.93%61.19%38.17%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400)52.76%60.65%35.82%37.40%
Llama-3.2-3B-Instruct (SFT4k)43.89%53.88%29.85%20.61%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (PPO Cold Start)55.78%60.65%41.79%48.09%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)59.13%65.66%52.24%42.75%
", + "bbox": [ + 119, + 391, + 505, + 653 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/2151efeafc6930f4d8fc699da769a1f209b9d453a3e8ec9bf3cdf28ea194ce90.jpg", + "table_caption": [ + "Table 2: API-Bank Test Results (Main Result)" + ], + "table_footnote": [], + "table_body": "
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400)24.8%0.78
Qwen2.5-1.5B-Instruct (SFT4k)23.2%1.25
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (PPO Cold Start)23.2%2.38
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)44.0%1.19
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400)54.4%0.86
Qwen2.5-3B-Instruct (SFT4k)49.6%0.92
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (PPO Cold Start)40.0%1.14
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)60.0%1.32
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400)28.8%3.71
Qwen2.5-7B-Instruct (SFT4k)30.4%1.06
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (PPO Cold Start)48.0%1.25
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)72.0%1.63
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400)44.0%0.98
Llama-3.2-3B-Instruct (SFT4k)48.8%0.98
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (PPO Cold Start)29.6%1.42
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)52.0%0.89
", + "bbox": [ + 534, + 391, + 878, + 655 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 3: Bamboogle Test Results (Main Result)", + "bbox": [ + 544, + 665, + 868, + 678 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "when trained from scratch. These findings highlight that while PPO can benefit from our reward design, its impact is more limited compared to the more robust and consistent improvements observed with GRPO.", + "bbox": [ + 112, + 705, + 489, + 784 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Generalization Studies. We evaluate the generalization ability of our trained model in two challenging settings: unfamiliar scenarios and novel task goals (both from BFCL benchmark subset). Specifically, we test the model's performance in tool usage within unseen programming languages and its ability to detect irrelevant tools, neither of", + "bbox": [ + 112, + 808, + 490, + 921 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/edc4a0db668429fb699878570feac579cd2731eaea04e93c2eba09dd09e6b856.jpg", + "image_caption": [ + "(a) Unfamiliar Scenario" + ], + "image_footnote": [], + "bbox": [ + 515, + 720, + 690, + 812 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9da49ecd03180b3351e89de045a7633df7596f714deea3d3adb864ec3d0ad88d.jpg", + "image_caption": [ + "(b) Unfamiliar Goal", + "Figure 7: Qwen2.5-3B-Instruct's performance across unfamiliar programming language scenarios (left) and novel relevance detection task goals (right), evaluated under different training settings." + ], + "image_footnote": [], + "bbox": [ + 700, + 721, + 875, + 812 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "User: I would like to buy a movie ticket in San Jose at 11 o'clock in the night. Tools Available:", + "bbox": [ + 127, + 86, + 447, + 103 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Movies_1_BuyMovieTickets/movie_name, number_ofTickets, ...)", + "$\\rightarrow$ Buy tickets for a movie at a specific time and location.", + "2. Movies 1 FindMovies location.", + "$\\rightarrow$ Search movies by location, genre, or format.", + "3. Movies_1_GetTimesForMovie/movie_name, location, ...)", + "$\\rightarrow$ Get showtimes for a movie on a specific date and location." + ], + "bbox": [ + 129, + 104, + 448, + 153 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " The user wants to buy a movie ticket in San Jose at 11 PM. The available", + "bbox": [ + 127, + 159, + 460, + 167 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "tool, Movies_1_BuyMovieTickets, allows us to purchase movie tickets with the required details such as movie name, number of tickets, show date, location, show time, and show type. We need to find the movie name and the show date. However, the user did not specify the movie name or the show date. We can infer the show date from the current date, but we need to know the movie name to proceed with purchase. ", + "bbox": [ + 129, + 168, + 485, + 210 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " To assist in purchasing a movie ticket, I need the name of the movie and", + "bbox": [ + 129, + 218, + 473, + 225 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "the specific date of the show. Could you please provide this information? ", + "bbox": [ + 129, + 227, + 478, + 235 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "User: What is the slope of the line which is perpendicular to the line with the", + "bbox": [ + 129, + 249, + 442, + 256 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "equation $y = 3x + 2$ ?", + "bbox": [ + 129, + 258, + 216, + 266 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Tools Available:", + "bbox": [ + 129, + 266, + 196, + 274 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. findcritical_points(function, variable, range=[0.0, 3.4])", + "$\\rightarrow$ Find critical points of a function with respect to a variable, optionally within a range." + ], + "bbox": [ + 129, + 275, + 438, + 300 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " The problem is asking for the slope of a line that is perpendicular to the line with the equation $y = 3x + 2$ . The slope of the given line is 3. The slope of a line that is perpendicular to another line is the negative reciprocal of the original line's slope. So, the slope of the line perpendicular to $y = 3x + 2$ would be -1/3. The given tool find_critical_points is not relevant to finding the slope of a line and cannot be used to solve this problem.", + "bbox": [ + 127, + 306, + 487, + 356 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": " The slope of the line that is perpendicular to the line with the", + "bbox": [ + 129, + 363, + 426, + 372 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "equation $y = 3x + 2$ is -1/3. ", + "bbox": [ + 129, + 372, + 294, + 381 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 4: Cases of Qwen2.5-3B-Instruct (GRPO Cold Start) generalizing to irrelevant tool detection goals.", + "text_level": 1, + "bbox": [ + 112, + 395, + 485, + 425 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "which were explicitly included during RL training or in the dataset. As shown in Figure 7, Qwen2.5-3B-Instruct, when trained from scratch with our GRPO-based reward design, consistently achieves highest performance. Additionally, Section 4.3 presents two qualitative examples where the model proactively rejects inappropriate tool use—first by clarifying ambiguous intent, and second by opting to answer directly without tools. These behaviors reflect emergent proactivity and metacognition, enhancing efficiency, reducing hallucinations, and signaling foundational agentic intelligence.", + "bbox": [ + 112, + 454, + 487, + 646 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Free-form Inference Effectiveness. While our model is trained with a focus on tool call format and correctness, we further evaluate its ability to handle free-form tool use in a QA setting. Unlike the structured tool selection and application tasks, QA setting: (1) imposes no constraints on tool call parameters, and (2) evaluates only the final answer, making it a \"goal-oriented\" rather than a \"process-oriented\" task. This naturally introduces a multi-step interaction scenario.", + "bbox": [ + 112, + 661, + 487, + 821 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Specifically, we use Bamboogle, a multi-hop QA dataset, to assess this capability. The model is equipped with a web search tool, and we report both the answer accuracy and the number of tool calls for all baselines and our approach. As shown in Table 3, our reward design achieves the highest", + "bbox": [ + 112, + 824, + 487, + 921 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/3f289fa8a90fbf8fbf687744f30d2bb325c3c09d051f0b7147fec1d6f9461d2b.jpg", + "image_caption": [ + "(a) Response Length" + ], + "image_footnote": [], + "bbox": [ + 517, + 89, + 690, + 178 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/aa1c9884a4981a22763d4b52e5c17a6c9e5dc5abe1c1948ebb70001bf6540d0a.jpg", + "image_caption": [ + "(b) Length Reward", + "Figure 8: Response length (left) and its reward (right) trends across training steps for different models." + ], + "image_footnote": [], + "bbox": [ + 700, + 87, + 875, + 178 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "performance, despite this setting not being explicitly seen during training. Notably, our cold start GRPO model surpasses others in accuracy without relying on excessive number of tool calls. This suggests that the model can flexibly invoke tools when needed, effectively leverage feedback, wisely and efficiently navigating toward the correct answer.", + "bbox": [ + 507, + 268, + 882, + 380 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Analysis", + "text_level": 1, + "bbox": [ + 507, + 392, + 618, + 407 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this section, we conduct a series of ablation studies to identify the most effective reward design for tool calling. We explore various factors including reward type, scale, granularity, and temporal dynamics.", + "bbox": [ + 507, + 417, + 882, + 497 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.1 Effect of Length Reward", + "text_level": 1, + "bbox": [ + 507, + 508, + 749, + 524 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We first examine the role of a length-based reward. Prior work has demonstrated that the R1-like models can promote deeper reasoning, often reflected in longer thinking traces. To encourage this behavior, we introduce a reward term proportional to the length of the field:", + "bbox": [ + 507, + 529, + 882, + 625 + ], + "page_idx": 9 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} _ {\\text {l e n g t h}} = \\min \\left(\\frac {L _ {\\text {t h i n k}}}{L _ {\\text {t a r g e t}}}, 1\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 634, + 794, + 670 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "where $L_{\\text{think}}$ denotes the length of the thinking segment in model's output, and $L_{\\text{target}}$ denotes the target output length, which we empirically set to 512. We found that the raw model rarely generates responses longer than half this length, making 512 a reasonable and effective target for encouraging longer outputs. This length-based component is added to the overall reward, which now consists of format, correctness, and reasoning length.", + "bbox": [ + 507, + 678, + 882, + 824 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "As shown in Figure 8, both response length and the length reward generally increase throughout training, particularly for the Qwen model series. This indicates that the length reward effectively encourages longer reasoning. However, the downstream results in Table 5 reveal that adding a length", + "bbox": [ + 507, + 825, + 882, + 921 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/cd6dcd5e59416e9771d2031ef78c027d93b9d9dfba2d8b2e158e459218092ead.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (w/ Length Reward)33.23%70.58%71.36%35.63%0.50%94.44%4.52%
Qwen2.5-1.5B-Instruct (Dynamic)28.51%53.23%48.23%38.07%0.00%55.56%25.08%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (w/ Length reward)48.89%77.83%78.61%63.56%4.50%88.24%71.22%
Qwen2.5-3B-Instruct (Dynamic)48.24%77.60%79.11%63.22%3.00%88.89%68.53%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (w/ Length reward)44.98%78.02%77.54%56.55%1.25%100.00%63.76%
Llama-3.2-3B-Instruct (Dynamic)43.15%75.50%71.64%56.06%1.00%100.00%57.82%
", + "bbox": [ + 119, + 80, + 880, + 190 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Table 5: BFCL V3 Benchmark Results (Length)", + "bbox": [ + 332, + 198, + 662, + 212 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e489e46a097675008b0b2b8264d88eeba35e4558c034f26f0ab7174772e2d697.jpg", + "image_caption": [ + "(a) Response Length" + ], + "image_footnote": [], + "bbox": [ + 121, + 242, + 295, + 335 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/d4f5c580a080fa660a72ece01f1f516aab789459b4ebb589abcec9b899671c97.jpg", + "image_caption": [ + "(b) Length Reward", + "Figure 9: Response length (left) and its reward (right) trends across training steps within the dynamic length reward training setting." + ], + "image_footnote": [], + "bbox": [ + 305, + 242, + 480, + 332 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "reward does not consistently improve task performance, and in smaller-scale models, it can even cause substantial degradation. These observations suggest that while extended reasoning may appear desirable, it is not always beneficial for tool use tasks. In fact, excessive length may introduce unnecessary complexity, leading to overthinking and reduced effectiveness.", + "bbox": [ + 112, + 437, + 487, + 565 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Dynamic Length Reward. Since fixed-length rewards showed minimal impact and converged quickly, we explored a dynamic length reward that adapts over training steps. Specifically, we define:", + "bbox": [ + 112, + 575, + 487, + 640 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} _ {\\mathrm {d y n a m i c}} = \\min \\left(\\frac {L _ {\\mathrm {t h i n k}}}{L _ {\\mathrm {t a r g e t}} \\cdot (1 + p)}, 1\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 156, + 650, + 442, + 686 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where $S$ denotes the training steps and $p = \\frac{S_{\\mathrm{current}}}{S_{\\mathrm{total}}} \\in [0,1]$ represents the normalized training progress. This formulation gradually increases the target thinking length over time, aligning with model maturity.", + "bbox": [ + 112, + 695, + 485, + 775 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in fig. 9, this approach yields a steadier growth in thinking length, particularly for the Llama model. However, the performance results in Table 5 reveal that even scheduled rewards fail to improve performance. This further supports our hypothesis that extended reasoning may not benefit this task and can even have adverse effects.", + "bbox": [ + 112, + 776, + 487, + 888 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Takeaway 1: While length rewards encourage longer reasoning traces, they do not consistently improve task performance and may even harm it in smaller models, highlighting that longer reasoning is not inherently better for tool use tasks.", + "bbox": [ + 517, + 242, + 867, + 340 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.2 Effect of Reward Scale", + "text_level": 1, + "bbox": [ + 507, + 372, + 734, + 387 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Next, we investigate the effect of reward scaling, specifically the relative weighting between correctness and format rewards. Prior work in R1-style RL commonly assigns a higher weight to correctness reward than to format reward (Xie et al., 2025; Jin et al., 2025), emphasizing the importance of learning correct answer over superficial adherence to format. This strategy helps prevent reward hacking, where a model might exploit formatting heuristics without learning task semantics.", + "bbox": [ + 507, + 393, + 882, + 552 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To test the importance of this design choice, we conduct an ablation where we equalize the maximum correctness and format rewards by setting the former's range to $[-1, 1]$ , matching that of the format reward. This adjustment only affects the final normalization step of the correctness reward:", + "bbox": [ + 507, + 555, + 882, + 650 + ], + "page_idx": 10 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {R} _ {\\mathrm {c o r r e c t}} = 2 \\cdot \\frac {R _ {\\mathrm {m a x}}}{S _ {\\mathrm {m a x}}} - 1 \\in [ - 1, 1 ]\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 659, + 823, + 694 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "where all variables are defined as in Section 3.3.", + "bbox": [ + 507, + 703, + 867, + 717 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "As shown in Table 6, this equal-scaling variant, denoted as \"Equal Max\", results in a slight drop in overall accuracy across most models, with the exception of Qwen2.5-3B, which maintains performance comparable to the original setting. These results underscore the importance of assigning greater weight to correctness reward: doing so helps steer the model toward mastering the core reasoning and tool use capabilities necessary for robust generalization.", + "bbox": [ + 507, + 719, + 882, + 879 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Dynamic Reward Scaling. Building on the insight that correctness reward plays a more critical", + "bbox": [ + 507, + 889, + 882, + 921 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/546a4bc485365caf449427b56cbaac3273cab3509f2a8e83c4907d6a20f983a1.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Equal max)39.47%78.56%75.50%45.45%2.50%100.00%16.44%
Qwen2.5-1.5B-Instruct (Two stage)38.85%77.96%76.23%44.51%2.25%100.00%10.61%
Qwen2.5-1.5B-Instruct (Dynamic)45.71%78.31%75.73%58.91%2.50%100.00%57.20%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Equal max)51.76%81.50%79.50%69.79%4.25%88.89%78.07%
Qwen2.5-3B-Instruct (Two stage)50.66%80.62%78.82%67.93%3.50%88.89%76.42%
Qwen2.5-3B-Instruct (Dynamic)53.81%81.44%80.75%75.43%3.62%77.78%88.82%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Equal max)42.47%67.77%75.05%55.75%1.00%88.89%59.56%
Llama-3.2-3B-Instruct (Two stage)41.33%65.54%72.70%55.22%0.75%88.89%57.59%
Llama-3.2-3B-Instruct (Dynamic)46.85%83.00%72.77%61.00%3.38%88.89%59.37%
", + "bbox": [ + 119, + 82, + 880, + 223 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 6: BFCL V3 Benchmark Results (Scale)", + "bbox": [ + 337, + 231, + 657, + 244 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "role, we are further motivated by the intuition that different reward components may benefit from being emphasized at different stages of training. This leads us to explore dynamically adjusting reward scales in accordance with training progress. Specifically, we hypothesize that in early training, the model should prioritize learning the correct output format, which entails an easier objective, before gradually shifting focus to the more challenging goal of tool use correctness. To test this hypothesis, we design two dynamic reward scaling strategies:", + "bbox": [ + 112, + 272, + 487, + 449 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "- Two stage (Coarse) Setting: We divide training into two phases. In the first $s$ training steps, we downscale the correctness reward to $\\frac{1}{3}$ of its original scale while keeping the format reward at its original scale. After step $s$ , we restore the correctness reward to its original scale and simultaneously reduce the format reward to range $[0, 0.5]$ ( $\\frac{1}{2}$ of its original scale). Formally the reward scales are:", + "bbox": [ + 115, + 451, + 489, + 596 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S c a l e} _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} [ 0, 1 ] & \\text {i f S _ {\\text {c u r r e n t}} < s} \\\\ [ 0, 0. 5 ] & \\text {o t h e r w i s e} \\end{array} , \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 157, + 605, + 455, + 646 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S c a l e} _ {\\text {c o r r e c t}} = \\left\\{ \\begin{array}{l l} [ - 1, 1 ] & \\text {i f S _ {\\text {c u r r e n t}} < s} \\\\ [ - 3, 3 ] & \\text {o t h e r w i s e} \\end{array} \\right.\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 658, + 453, + 700 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $S_{\\mathrm{current}}$ denotes the current training step. In our experiments, we empirically set the switching point to $s = 30$ steps, as we observed that the format reward typically experiences a significant increase within the first 30 steps. Therefore, it is more beneficial for later steps to shift focus toward optimizing correctness.", + "bbox": [ + 127, + 705, + 489, + 816 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "- Dynamic (Finegrained) Setting: We apply continuous interpolation between the two reward scales throughout training. Initially, both the format and correctness reward scales are set equally. Over time, the format reward scale linearly decays to its original value, while the correctness", + "bbox": [ + 115, + 824, + 489, + 921 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/c4a42ce99210f1c629662d6104f163b5fa2dbc2a5dfddaa2923d8c1986ba915a.jpg", + "image_caption": [ + "(a) Format Reward" + ], + "image_footnote": [], + "bbox": [ + 515, + 274, + 690, + 367 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/07889187074a1d5140bc409ad1bd803fcc49ff172e176507ffd4fb8fa1a0b325.jpg", + "image_caption": [ + "(b) Correctness Reward", + "Figure 10: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different reward scale dynamics." + ], + "image_footnote": [], + "bbox": [ + 700, + 274, + 875, + 366 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "reward scale gradually increases to its original value, allowing the training to shift focus from format adherence to task correctness accordingly. Formally, the dynamic scaling is then defined as:", + "bbox": [ + 524, + 469, + 882, + 533 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S c a l e} _ {\\text {f o r m a t}} = [ - 2 + p, 2 - p ],\n$$\n", + "text_format": "latex", + "bbox": [ + 589, + 546, + 815, + 564 + ], + "page_idx": 11 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {S c a l e} _ {\\text {c o r r e c t}} = [ - 2 - p, 2 + p ]\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 575, + 813, + 593 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "where $p \\in [0,1]$ similarly represents the normalized training progress. This design ensures a smooth shift of learning focus from format fidelity to correctness.", + "bbox": [ + 522, + 599, + 884, + 663 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We present the reward dynamics of the original and two dynamic scaling strategies in Figure 10. As shown in Table 6, the Two stage (Coarse) reward setting unexpectedly leads to a drop in performance, whereas the Dynamic (Finegrained) scaling could improve model's benchmarking performance. These findings suggest that abrupt shifts in reward scale may negatively impact the training dynamics. In contrast, a smoother and gradual transition from simpler objectives to more nuanced ones appears to better support the model's learning trajectory and generalization during GRPO training.", + "bbox": [ + 507, + 668, + 882, + 860 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/ff3197de12e4e5c6e186807989a91719f5b1f0d72bc4490312c2ad49c44e81f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 124, + 87, + 149, + 105 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Takeaway 2: Gradually adjusting reward scales during training, rather than abrupt changes, better supports model learning and generalization, highlighting the benefits of a smoother transition from simpler objectives to more complex ones.", + "bbox": [ + 152, + 89, + 467, + 184 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5.3 Effect of Reward Granularity", + "text_level": 1, + "bbox": [ + 112, + 217, + 391, + 233 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We now perform a detailed analysis of the effect of reward granularity, focusing specifically on the correctness reward. Tool calling, by nature, poses challenges for reward assignment, as it involves multiple facets beyond a single definitive answer (e.g., in contrast to math reasoning tasks). Our original reward design decomposes correctness into matching the tool name, parameter names, and parameter values, offering a finegrained, \"process-oriented\" signal that reflects partial correctness in tool usage.", + "bbox": [ + 112, + 237, + 485, + 413 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To assess the impact of this granularity, we evaluate three alternative reward formulations with progressively coarser levels of aggregation:", + "bbox": [ + 112, + 414, + 489, + 463 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "- Finegrained: We apply strict exact-match constraints to both tool name and parameter name matching. Specifically, we define:", + "bbox": [ + 115, + 466, + 489, + 514 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {n a m e}} = \\mathbb {1} \\left[ N _ {G} = N _ {P} \\right] \\in \\{0, 1 \\}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 527, + 405, + 542 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {p a r a m}} = \\sum_ {G _ {j} \\in G} \\mathbb {1} \\left[ \\operatorname {k e y s} \\left(P _ {G}\\right) = \\operatorname {k e y s} \\left(P _ {P}\\right) \\right] \\in [ 0, | G | ]\n$$\n", + "text_format": "latex", + "bbox": [ + 149, + 546, + 465, + 576 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "- Intermediate: We combine the parameter name and value rewards into a single term that enforces an exact match on the entire parameter dictionary. Formally:", + "bbox": [ + 115, + 587, + 487, + 652 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} = \\sum_ {G _ {j} \\in G} \\mathbb {1} \\left[ P _ {G} = P _ {P} \\right] \\in [ 0, | G | ]\n$$\n", + "text_format": "latex", + "bbox": [ + 164, + 663, + 450, + 692 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "- Coarse: At the coarsest level, we fully entangle tool name, parameter names, and parameter values, treating the entire tool set as a unit. Reward is given only if the generated tool set exactly matches the ground truth:", + "bbox": [ + 115, + 703, + 487, + 783 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\nr _ {\\text {n a m e}} + r _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} = \\mathbb {1} [ G = P ] \\in \\{0, 1 \\}\n$$\n", + "text_format": "latex", + "bbox": [ + 169, + 797, + 445, + 812 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "All other aspects of reward computation are kept identical to those described in Section 3.3. Starting from our original design, which is the most finegrained, we progressively entangle reward components to derive increasingly coarse-grained alternatives.", + "bbox": [ + 112, + 824, + 489, + 919 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/552649707e12fd059580188a20cb855e21ec6e4f969add3bd77cecc592a62b4e.jpg", + "image_caption": [ + "Figure 11: Correctness reward trends across training steps for Qwen2.5-3B-Instruct with different reward granularity." + ], + "image_footnote": [], + "bbox": [ + 512, + 82, + 878, + 178 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The reward dynamics across training steps, shown in Figure 11, demonstrate that as reward granularity becomes coarser, it becomes harder for the model to achieve higher reward values during RL training. This suggests that overly strict and entangled rewards may lead to sparse learning signals, potentially hindering effective credit assignment.", + "bbox": [ + 507, + 256, + 882, + 369 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Empirical results in Table 7 further support this insight: our original, most finegrained reward strategy performs well across models. In general, finer-grained reward decomposition leads to better training outcomes and higher final task performance, indicating its advantage in promoting more stable and effective policy learning.", + "bbox": [ + 507, + 370, + 882, + 482 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/1454ca022118595022cec19aa91d5512614820037c89daf0cd26bb64d0b07139.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 517, + 493, + 544, + 511 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Takeaway 3: Finegrained reward decomposition provides richer learning signals, highlighting its role in enabling more effective training compared to coarse reward formulations, which can impede progress and degrade final performance.", + "bbox": [ + 546, + 494, + 865, + 590 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 623, + 640, + 637 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "In this paper, we present a reward design tailored for GRPO training on tool use tasks. Empirically, our model trained from scratch using GRPO consistently outperforms both SFT-based and SFT-initialized RL baselines, as well as models trained with alternative RL algorithms, across a variety of held-out tool use benchmarks. Furthermore, we demonstrate that our model generalizes well to QA settings, exhibiting robust multi-turn interactions, emergent proactiveness, and metacognitive behaviors, all of which are key traits for efficient and adaptable tool use, lying at the core of foundational agent capabilities. Our in-depth analysis of reward types, scaling strategies, granularity, and temporal dynamics provides further insights into how reward shaping influences learning and behavior. We hope these findings serve as a roadmap for future work", + "bbox": [ + 507, + 646, + 882, + 921 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/c0decb62c717c5b9e2ed93474a6147510ea0da8e44b962ada76b0687d177fa8e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Finegrained)40.71%78.00%75.55%48.91%2.00%100.00%24.84%
Qwen2.5-1.5B-Instruct (Intermediate)37.65%77.94%72.46%43.00%1.62%100.00%12.45%
Qwen2.5-1.5B-Instruct (Coarse)36.72%76.44%70.86%41.27%2.12%100.00%12.24%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Finegrained)52.06%81.65%79.64%69.21%5.50%83.33%78.14%
Qwen2.5-3B-Instruct (Intermediate)51.36%81.15%80.07%68.64%4.25%88.89%75.74%
Qwen2.5-3B-Instruct (Coarse)51.40%79.48%78.54%68.73%5.62%88.89%77.80%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Finegrained)39.82%64.71%70.68%52.20%0.25%100.00%56.68%
Llama-3.2-3B-Instruct (Intermediate)38.62%59.83%71.86%50.56%0.25%94.44%55.68%
Llama-3.2-3B-Instruct (Coarse)35.95%52.00%61.43%48.96%1.12%83.33%61.92%
", + "bbox": [ + 119, + 80, + 878, + 219 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 7: BFCL V3 Benchmark Results (Granularity)", + "bbox": [ + 317, + 230, + 677, + 244 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "in applying reinforcement learning to tool use. Ultimately, we envision that reward is all tool learning needs, and that RL offers a powerful path toward generalizable and creative agent behavior.", + "bbox": [ + 112, + 269, + 489, + 335 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 363, + 213, + 378 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Emre Can Acikgoz, Jeremiah Greer, Akul Datta, Ze Yang, William Zeng, Oussama Elachqar, Emmanuel Koukoumidis, Dilek Hakkani-Tur, and Gokhan Tur. 2025. Can a single model master both multi-turn conversations and tool use? coalm: A unified conversational agentic language model. Preprint, arXiv:2502.08820.", + "bbox": [ + 115, + 388, + 489, + 479 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Jinheon Baek, Sujay Kumar Jauhar, Silviu Cucerzan, and Sung Ju Hwang. 2024. Researchagent: Iterative research idea generation over scientific literature with large language models. arXiv preprint arXiv:2404.07738.", + "bbox": [ + 114, + 492, + 489, + 557 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. 2023a. Fireact: Toward language agent fine-tuning. arXiv preprint arXiv:2310.05915.", + "bbox": [ + 114, + 569, + 489, + 623 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Nuo Chen, Hongguang Li, Baoyuan Wang, and Jia Li. 2023b. From good to great: Improving math reasoning with tool-augmented interleaf prompting. arXiv preprint arXiv:2401.05384.", + "bbox": [ + 114, + 634, + 489, + 687 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W Cohen. 2022. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. arXiv preprint arXiv:2211.12588.", + "bbox": [ + 114, + 699, + 489, + 763 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. 2024. Agent-FLAN: Designing data and methods of effective agent tuning for large language models. In *Findings of the Association for Computational Linguistics: ACL* 2024, pages 9354–9366, Bangkok, Thailand. Association for Computational Linguistics.", + "bbox": [ + 114, + 776, + 489, + 882 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V", + "bbox": [ + 114, + 892, + 489, + 920 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Le, Sergey Levine, and Yi Ma. 2025. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161.", + "bbox": [ + 526, + 271, + 884, + 324 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Quy-Anh Dang and Chris Ngo. 2025. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219.", + "bbox": [ + 509, + 335, + 882, + 376 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783.", + "bbox": [ + 509, + 387, + 882, + 454 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452.", + "bbox": [ + 509, + 464, + 882, + 530 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948.", + "bbox": [ + 509, + 543, + 882, + 609 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Jiangyong Huang, Silong Yong, Xiaojian Ma, Xiongkun Linghu, Puhao Li, Yan Wang, Qing Li, Song-Chun Zhu, Baoxiong Jia, and Siyuan Huang. 2023. An embodied generalist agent in 3d world. arXiv preprint arXiv:2311.12871.", + "bbox": [ + 509, + 621, + 882, + 686 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. 2024. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489.", + "bbox": [ + 509, + 699, + 882, + 778 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Yoshitaka Inoue, Tianci Song, and Tianfan Fu. 2024. Drugagent: Explainable drug repurposing agent with large language model-based reasoning. arXiv preprint arXiv:2408.13378.", + "bbox": [ + 509, + 789, + 882, + 841 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Bowen Jin, Hansi Zeng, Zhenrui Yue, Dong Wang, Hamed Zamani, and Jiawei Han. 2025. Search: Training lms to reason and leverage search engines with reinforcement learning. arXiv preprint arXiv:2503.09516.", + "bbox": [ + 509, + 854, + 882, + 919 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Minki Kang, Jongwon Jeong, and Jaewoong Cho. 2025. T1: Tool-integrated self-verification for test-time compute scaling in small language models. arXiv preprint arXiv:2504.04718.", + "Timo Kaufmann, Paul Weng, Viktor Bengs, and Eyke Hüllermeier. 2023. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925.", + "Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025. Lm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321.", + "Minghao Li, Yingxiu Zhao, Bowen Yu, Feifan Song, Hangyu Li, Haiyang Yu, Zhoujun Li, Fei Huang, and Yongbin Li. 2023. Api-bank: A comprehensive benchmark for tool-augmented llms. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 3102-3116.", + "Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025a. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886.", + "Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025b. Torl: Scaling tool-integrated rl. arXiv preprint arXiv:2503.23383.", + "Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. 2024. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190.", + "Qiqiang Lin, Muning Wen, Qiuying Peng, Guanyu Nie, Junwei Liao, Jun Wang, Xiaoyun Mo, Jiamu Zhou, Cheng Cheng, Yin Zhao, et al. 2024. Hammer: Robust function-calling for on-device language models via function masking. arXiv preprint arXiv:2410.04587.", + "Chen Ling, Xujiang Zhao, Jiaying Lu, Chengyuan Deng, Can Zheng, Junxiang Wang, Tanmoy Chowdhury, Yun Li, Hejie Cui, Xuchao Zhang, et al. 2023. Domain specialization as the key to make large language models disruptive: A comprehensive survey. arXiv preprint arXiv:2305.18703.", + "Weiwen Liu, Xu Huang, Xingshan Zeng, Xinlong Hao, Shuai Yu, Dexun Li, Shuai Wang, Weinan Gan, Zhengying Liu, Yuanqing Yu, et al. 2024. Toolace: Winning the points of llm function calling. arXiv preprint arXiv:2409.00920.", + "Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. Advances in Neural Information Processing Systems, 37:124198-124235.", + "Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2023. Gorilla: Large language model connected with massive apis. arXiv preprint arXiv:2305.15334." + ], + "bbox": [ + 115, + 85, + 485, + 917 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems, 37:126544-126565.", + "Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A Smith, and Mike Lewis. 2022. Measuring and narrowing the compositionality gap in language models. arXiv preprint arXiv:2210.03350.", + "Cheng Qian, Emre Can Acikgoz, Hongru Wang, Xiusi Chen, Avirup Sil, Dilek Hakkani-Tur, Gokhan Tur, and Heng Ji. 2025. Smart: Self-aware agent for tool overuse mitigation. arXiv preprint arXiv:2502.11435.", + "Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. 2023. Creator: Tool creation for disentangling abstract and concrete reasoning of large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 6922-6939.", + "Cheng Qian, Peixuan Han, Qinyu Luo, Bingxiang He, Xiusi Chen, Yuji Zhang, Hongyi Du, Jiarui Yao, Xiaocheng Yang, Denghui Zhang, et al. 2024a. Escapebench: Pushing language models to think outside the box. arXiv preprint arXiv:2412.13549.", + "Cheng Qian, Chenyan Xiong, Zhenghao Liu, and Zhiyuan Liu. 2024b. Toolink: Linking toolkit creation and using through chain-of-solving on open-source model. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 831-854.", + "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. 2024a. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982.", + "Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Yufei Huang, Chaojun Xiao, Chi Han, et al. 2023. Tool learning with foundation models. arXiv preprint arXiv.2304.08354, 10.", + "Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Xuanhe Zhou, Yufei Huang, Chaojun Xiao, et al. 2024b. Tool learning with foundation models. ACM Computing Surveys, 57(4):1-40.", + "Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Lauren Hong, Runchu Tian, Ruobing Xie, Jie Zhou, Mark Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024c. Toollm: Facilitating large language models to master 16000+ real-world apis. In The Twelfth International Conference on Learning Representations." + ], + "bbox": [ + 510, + 85, + 880, + 920 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741.", + "Yusuf Roohani, Andrew Lee, Qian Huang, Jian Vora, Zachary Steinhart, Kexin Huang, Alexander Marson, Percy Liang, and Jure Leskovec. 2024. Biodiscoveryagent: An ai agent for designing genetic perturbation experiments. arXiv preprint arXiv:2405.17631.", + "Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettle-moyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551.", + "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300.", + "Haozhan Shen, Peng Liu, Jingcheng Li, Chunxin Fang, Yibo Ma, Jiajia Liao, Qiaoli Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, et al. 2025. Vlmr1: A stable and generalizable r1-style large vision-language model. arXiv preprint arXiv:2504.07615.", + "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv:2409.19256.", + "Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592.", + "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599.", + "Qwen Team. 2024. Qwen2.5: A party of foundation models.", + "Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214." + ], + "bbox": [ + 115, + 85, + 489, + 919 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yidong Wang, Qi Guo, Wenjin Yao, Hongbo Zhang, Xin Zhang, Zhen Wu, Meishan Zhang, Xinyu Dai, Qingsong Wen, Wei Ye, et al. 2024. Autosurvey: Large language models can automatically write surveys. Advances in Neural Information Processing Systems, 37:115119-115145.", + "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768.", + "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations.", + "Yining Ye, Xin Cong, Shizuo Tian, Yujia Qin, Chong Liu, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2023. Rational decision-making agent with internalized utility judgment. arXiv preprint arXiv:2308.12519.", + "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476.", + "Yuanqing Yu, Zhefan Wang, Weizhi Ma, Zhicheng Guo, Jingtao Zhan, Shuai Wang, Chuhan Wu, Zhiqiang Guo, and Min Zhang. 2024. Steptool: A step-grained reinforcement learning framework for tool learning in llms. arXiv preprint arXiv:2410.07745.", + "Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, et al. 2025. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118.", + "Aohan Zeng, Mingdao Liu, Rui Lu, Bowen Wang, Xiao Liu, Yuxiao Dong, and Jie Tang. 2024. AgentTuning: Enabling generalized agent abilities for LLMs. In Findings of the Association for Computational Linguistics: ACL 2024, pages 3053-3077, Bangkok, Thailand. Association for Computational Linguistics.", + "Yuanzhao Zhai, Tingkai Yang, Kele Xu, Feng Dawei, Cheng Yang, Bo Ding, and Huaimin Wang. 2024. Enhancing decision-making for llm agents via step-level q-value models. arXiv preprint arXiv:2409.09345.", + "Hongxin Zhang, Weihua Du, Jiaming Shan, Qinhong Zhou, Yilun Du, Joshua B Tenenbaum, Tianmin Shu, and Chuang Gan. 2023. Building cooperative embodied agents modularly with large language models. arXiv preprint arXiv:2307.02485.", + "Jianguo Zhang, Tian Lan, Ming Zhu, Zuxin Liu, Thai Hoang, Shirley Kokane, Weiran Yao, Juntao Tan, Akshara Prabhakar, Haolin Chen, et al. 2024. xlam: A family of large action models to empower ai agent systems. arXiv preprint arXiv:2409.03215." + ], + "bbox": [ + 510, + 85, + 882, + 919 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. Deepresearch: Scaling deep research via reinforcement learning in real-world environments. arXiv preprint arXiv:2504.03160.", + "bbox": [ + 115, + 85, + 489, + 152 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 114, + 84, + 203, + 99 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A User Prompt Details", + "text_level": 1, + "bbox": [ + 114, + 109, + 329, + 126 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The system instruction is shown in Figure 4. The user prompt is used to store the trajectory history, including intermediate thoughts, tool calls, environment observations, and any additional user commands. The complete user instruction is presented in Figure 12.", + "bbox": [ + 112, + 134, + 489, + 231 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B Experiment Details", + "text_level": 1, + "bbox": [ + 114, + 242, + 319, + 259 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Training Data Details. We empirically use 4K data points for training, as each dataset consists of samples drawn from the same distribution. Adding more data of similar nature does not increase task diversity. Moreover, we observe that increasing the dataset size beyond 4K does not yield noticeable improvements in the training convergence or final performance, suggesting diminishing returns from additional data under this setting.", + "bbox": [ + 112, + 267, + 487, + 413 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "GRPO Setting Details. For all the tool calls in the dataset, we all use JSON format to represent tool call as it's easy to parse and is the most general and structure way of performing tool call. For the GRPO training, we use 2 A100 (80G) GPUs per run with the following hyper-parameters:", + "bbox": [ + 112, + 420, + 487, + 519 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/d65267e68fdd04a19a99679ce48395fe4605cf2779afc6e66e0b2988f88b2363.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length2048
Max Response Length1024
Optimization
Learning Rate1e-6
PPO Mini Batch Size128
KL Loss UsedFalse
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.6
Number of Rollouts4
Training & Logging
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
", + "bbox": [ + 146, + 529, + 455, + 809 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Baselines. The 400 selected data points used for SFT share the same distribution as the 4k data points used for RL training, but differ in content. For SFT, each data point includes a field,", + "bbox": [ + 112, + 856, + 489, + 921 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "with thought content distilled from Deepseek-R1 trajectories. In contrast, GRPO does not require ground truth thought, as only the tool calls are used to compute rewards in the GRPO setting.", + "bbox": [ + 507, + 84, + 880, + 148 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We use 400 data points for SFT based on empirical observations that this amount is sufficient to help the raw model learn to follow our tool call format. This provides a stronger initialization and reduces the burden of learning the format from scratch during RL training. However, we also find that relying solely on SFT can lead to overfitting, which may ultimately degrade performance.", + "bbox": [ + 507, + 149, + 882, + 278 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "PPO Setting Details. We apply approximately the same parameter settings as GRPO for the PPO training. Similarly, we use 2 A100 (80G) GPUs per run with the following hyper-parameters:", + "bbox": [ + 507, + 287, + 882, + 351 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/8e17cc4deaa8d819f6fc131f70f9940360284bd7649a786eba2c64bd6b7e3e7e.jpg", + "table_caption": [ + "Table 8: Configuration for GRPO training." + ], + "table_footnote": [], + "table_body": "
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length1024
Max Response Length512
Optimization
Actor Learning Rate1e-6
Critic Learning Rate1e-5
PPO Mini Batch Size128
PPO Micro Batch Size8
KL Coefficient0.001
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.3
Training & Logging
Critic Warmup Steps0
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
", + "bbox": [ + 542, + 362, + 850, + 665 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 9: Configuration for PPO training.", + "bbox": [ + 556, + 674, + 833, + 689 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C Additional Results", + "text_level": 1, + "bbox": [ + 507, + 717, + 709, + 733 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We present additional results on three benchmarks, applying GRPO and PPO methods to models initialized with SFT on 4K data points. This setting serves as a \"theoretical\" upper bound, since the same 4K data is first used for SFT and subsequently reused for RL training.", + "bbox": [ + 507, + 744, + 882, + 840 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The results are shown in Table 10, Table 11, and Table 12 for BFCL, API-Bank, and Bamboogle, respectively. We compare RL training initialized with models fine-tuned on either 400 or 4K SFT data points.", + "bbox": [ + 507, + 841, + 882, + 921 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/86c8046a4208455b9d278a90cc615af285d011f3f28c85a707ecd1bb20d82e49.jpg", + "image_caption": [ + "Figure 12: The user prompt used for TIR's rollout." + ], + "image_footnote": [], + "bbox": [ + 173, + 80, + 821, + 262 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/515ed892cbd40ad8d47a38bf66845b41c9ea2e093ff999e1bf328a5899c6054a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)40.24%66.42%62.02%54.58%2.50%94.12%55.09%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)42.63%66.60%64.77%60.15%1.38%88.89%67.98%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (SFT4k+PPO)48.22%77.75%73.18%64.27%5.25%94.12%66.41%
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.82%75.12%69.52%68.19%2.38%77.78%76.16%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100%14.19%
Qwen2.5-7B-Instruct (SFT4k+PPO)33.80%42.67%49.50%51.80%2.38%77.78%55.79%
Qwen2.5-7B-Instruct (SFT4k+GRPO)35.18%43.58%50.39%55.49%0.87%77.78%67.12%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (SFT4k+PPO)45.41%73.71%68.46%62.27%2.50%82.35%68.75%
Llama-3.2-3B-Instruct (SFT4k+GRPO)45.50%70.69%67.70%64.73%1.00%77.78%78.85%
", + "bbox": [ + 119, + 300, + 878, + 512 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/3b949f3b4313649aa4b382e0445d2ea3d6d9a666abd057e2de2228351c351419.jpg", + "table_caption": [ + "Table 10: BFCL V3 Benchmark Results (Additional Result)" + ], + "table_footnote": [], + "table_body": "
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)61.31%64.91%56.72%52.67%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)59.46%65.16%53.73%45.04%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (SFT4k+PPO)60.13%64.41%44.78%54.96%
Qwen2.5-3B-Instruct (SFT4k+GRPO)60.80%64.41%56.72%51.91%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.4%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (SFT4k+PPO)59.30%61.40%40.30%61.60%
Qwen2.5-7B-Instruct (SFT4k+GRPO)52.60%56.39%34.33%50.38%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (SFT4k+PPO)54.10%60.65%40.30%41.22%
Llama-3.2-3B-Instruct (SFT4k+GRPO)50.92%59.15%34.33%34.35%
", + "bbox": [ + 119, + 550, + 507, + 766 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/e2378971efe8c48bc5aecbb8f0825cd5d3570bcbe7aef5b317bdc0a4649d542f.jpg", + "table_caption": [ + "Table 11: API-Bank Test Results (Additional Result)" + ], + "table_footnote": [], + "table_body": "
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (SFT4k+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)34.4%1.02
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (SFT4k+PPO)46.4%1.01
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.2%0.98
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (SFT4k+PPO)40.0%1.25
Qwen2.5-7B-Instruct (SFT4k+GRPO)32.0%1.25
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (SFT4k+PPO)49.6%1.02
Llama-3.2-3B-Instruct (SFT4k+GRPO)42.4%1.03
", + "bbox": [ + 534, + 550, + 880, + 766 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 12: Bamboogle Test Results (Additional Result)", + "bbox": [ + 529, + 776, + 884, + 804 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Interestingly, our findings suggest that initializing from a model finetuned on 4K data does not consistently outperform initialization from a model finetuned on only 400 data points. In the BFCL benchmark, we even observe cases where perfor", + "bbox": [ + 112, + 831, + 490, + 910 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "mance drops below that of the raw instruct model. This counterintuitive result may stem from overfitting during the SFT phase, which could restrict the model's ability to explore during RL training and lead to poorer generalization on held-out tasks.", + "bbox": [ + 507, + 831, + 884, + 912 + ], + "page_idx": 18 + } +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_model.json b/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_model.json new file mode 100644 index 0000000000000000000000000000000000000000..6b18f9eed3a3895cf7533da2f19e6336598b643e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_model.json @@ -0,0 +1,4187 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.272, + 0.091, + 0.728, + 0.112 + ], + "angle": 0, + "content": "ToolRL: Reward is All Tool Learning Needs" + }, + { + "type": "title", + "bbox": [ + 0.21, + 0.14, + 0.795, + 0.174 + ], + "angle": 0, + "content": "Cheng Qian, Emre Can Acikgoz, Qi He, Hongru Wang, Xiusi Chen, Dilek Hakkani-Tür, Gokhan Tur, Heng Ji" + }, + { + "type": "text", + "bbox": [ + 0.332, + 0.175, + 0.667, + 0.19 + ], + "angle": 0, + "content": "University of Illinois Urbana-Champaign" + }, + { + "type": "text", + "bbox": [ + 0.348, + 0.191, + 0.652, + 0.207 + ], + "angle": 0, + "content": "{chengq9, hengji}@illinois.edu" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.276 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.142, + 0.291, + 0.461, + 0.745 + ], + "angle": 0, + "content": "Current Large Language Models (LLMs) often undergo supervised fine-tuning (SFT) to acquire tool use capabilities. However, SFT struggles to generalize to unfamiliar or complex tool use scenarios. Recent advancements in reinforcement learning (RL), particularly with R1-like models, have demonstrated promising reasoning and generalization abilities. Yet, reward design for tool use presents unique challenges: multiple tools may be invoked with diverse parameters, and coarse-grained reward signals, such as answer matching, fail to offer the fine-grained feedback required for effective learning. In this work, we present the first comprehensive study on reward design for tool selection and application tasks within the RL paradigm. We systematically explore a wide range of reward strategies, analyzing their types, scales, granularity, and temporal dynamics. Building on these insights, we propose a principled reward design tailored for tool use tasks and apply it to train LLMs using Group Relative Policy Optimization (GRPO). Empirical evaluations across diverse benchmarks demonstrate that our approach yields robust, scalable, and stable training, achieving a \\(17\\%\\) improvement over base models and a \\(15\\%\\) gain over SFT models. These results highlight the critical role of thoughtful reward design in enhancing the tool use capabilities and generalization performance of LLMs. All the code are released to facilitate future research.\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.761, + 0.26, + 0.777 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.788, + 0.49, + 0.886 + ], + "angle": 0, + "content": "Recent advances in Large Language Models (LLMs) have showcased remarkable capabilities in complex reasoning tasks (Kumar et al., 2025). Among the techniques that have significantly contributed to this progress, Reinforcement Learning (RL) has emerged as a powerful paradigm, enabling" + }, + { + "type": "title", + "bbox": [ + 0.55, + 0.262, + 0.846, + 0.273 + ], + "angle": 0, + "content": "Task Goal: Irrelevant Tool Detection (LLM should reject in appropriate tools)" + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.275, + 0.558, + 0.292 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.565, + 0.279, + 0.856, + 0.289 + ], + "angle": 0, + "content": "What's the distance between San Francisco and Los Angeles in kilometers?" + }, + { + "type": "title", + "bbox": [ + 0.546, + 0.296, + 0.824, + 0.305 + ], + "angle": 0, + "content": "{Name: get_date, Parameters: {location_1: str, location_2: str, unit: str}}" + }, + { + "type": "title", + "bbox": [ + 0.536, + 0.314, + 0.582, + 0.322 + ], + "angle": 0, + "content": "SFT Model" + }, + { + "type": "title", + "bbox": [ + 0.536, + 0.322, + 0.668, + 0.33 + ], + "angle": 0, + "content": "(Distilled from R1 Long Thought)" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.332, + 0.729, + 0.376 + ], + "angle": 0, + "content": " The user wants to find the distance in kilometers between San Francisco and Los Angeles. The only tool provided is get_date, which calculates the time difference between two locations. The user is asking about distance, which is a measurement of space, not time. But wait, maybe there's a" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.376, + 0.718, + 0.385 + ], + "angle": 0, + "content": "space, not often. But, maybe it's a misunderstanding Over-Interpretation! perhaps" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.385, + 0.709, + 0.4 + ], + "angle": 0, + "content": "the tool can still be used if possible. Maybe the tool's parameters are misnamed, and it actually" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.4, + 0.728, + 0.423 + ], + "angle": 0, + "content": "returns distance. Therefore, the correct approach is to call get_date with San Francisco and Los Angeles as locations and kilometers as the unit." + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.425, + 0.703, + 0.433 + ], + "angle": 0, + "content": " {Name: get date, Parameters:" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.433, + 0.703, + 0.441 + ], + "angle": 0, + "content": "location 1: San Francisco, location 2: Los" + }, + { + "type": "text", + "bbox": [ + 0.536, + 0.441, + 0.689, + 0.45 + ], + "angle": 0, + "content": "\\(\\text{Angeles,unit:kilometers}\\} \\} < /\\) tool_call>" + }, + { + "type": "title", + "bbox": [ + 0.743, + 0.314, + 0.785, + 0.322 + ], + "angle": 0, + "content": "RL Model" + }, + { + "type": "title", + "bbox": [ + 0.743, + 0.322, + 0.843, + 0.33 + ], + "angle": 0, + "content": "(Trained through GRPO)" + }, + { + "type": "text", + "bbox": [ + 0.743, + 0.33, + 0.855, + 0.376 + ], + "angle": 0, + "content": " The user wants to find the distance between San Francisco and Los Angeles in kilometers. However, the available tool 'get_date' is not suitable for calculating" + }, + { + "type": "title", + "bbox": [ + 0.743, + 0.376, + 0.819, + 0.385 + ], + "angle": 0, + "content": "distances." + }, + { + "type": "text", + "bbox": [ + 0.743, + 0.385, + 0.852, + 0.449 + ], + "angle": 0, + "content": "The tool get_date'is not suitable for calculating the distance between two cities.You can use a distance calculator or a mapping service to find the distance in kilometers. " + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.464, + 0.882, + 0.493 + ], + "angle": 0, + "content": "Figure 1: SFT on distilled deep-thinking trajectories suffers from overthinking and limited generalization." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.519, + 0.885, + 0.647 + ], + "angle": 0, + "content": "LLMs to develop emergent capabilities such as self-reflection, self-correction, and long-horizon planning (Guo et al., 2025; Team et al., 2025). These capabilities have been instrumental in the success of models like o1 and R1, particularly in mathematical and logical reasoning domains (Qin et al., 2024a; Huang et al., 2024; Li et al., 2025b; Kang et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.648, + 0.885, + 0.889 + ], + "angle": 0, + "content": "Beyond traditional reasoning tasks, an increasingly important area is Tool-Integrated Reasoning (TIR). TIR involves LLMs interacting with external tools, such as search engines (Jin et al., 2025; Zheng et al., 2025), calculators (Chen et al., 2023b; Qin et al., 2023), or code interpreters (Gou et al., 2023; Liao et al., 2024), in a multi-step, feedback-driven loop to arrive at solutions. TIR is particularly important because it addresses core limitations of LLMs, such as outdated knowledge, calculation inaccuracy, and shallow reasoning. By integrating external tools that offer real-time access and specialized capabilities, TIR enables models to tackle complex tasks in a more grounded and goal-directed way." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.89, + 0.884, + 0.92 + ], + "angle": 0, + "content": "Unlike textual reasoning, which primarily involves deduction and inference from static text," + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.307, + 0.061, + 0.724 + ], + "angle": 270, + "content": "arXiv:2504.13958v1 [cs.LG] 16 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.895, + 0.488, + 0.92 + ], + "angle": 0, + "content": "1 Data and codes released at https://github.com/qiancheng@/ToolRL" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.116, + 0.083, + 0.49, + 0.161 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.161, + 0.49, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.23, + 0.49, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.094, + 0.883, + 0.194 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.218, + 0.88, + 0.317 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.34, + 0.885, + 0.385 + ], + "angle": 0, + "content": "Figure 2: Main results (left) and reward trends over training steps for GRPO Cold Start across four models (right). GRPO Cold Start, equipped with our proposed reward design, consistently achieves the highest performance, with reward curves showing a rapid increase during training." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.409, + 0.49, + 0.603 + ], + "angle": 0, + "content": "TIR additionally demands the model's ability to select appropriate tools, interpret intermediate outputs, and adaptively refine its trajectory on the fly. These dynamic and interactive reasoning skills position TIR at the core of the emerging paradigm of LLMs-as-agents. As such, TIR enables a wide range of applications, including scientific discovery (Roohani et al., 2024; Inoue et al., 2024), research automation (Baek et al., 2024; Wang et al., 2024), embodied task completion (Zhang et al., 2023; Huang et al., 2023), and everyday decision-making (Ye et al., 2023; Zhai et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.616, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Training LLMs for TIR tasks has predominantly relied on Supervised Fine-Tuning (SFT), wherein existing approaches typically generate these integrated reasoning steps offline, followed by subsequent SFT on these trajectories (Chen et al., 2023a; Zeng et al., 2024; Chen et al., 2024; Acikgoz et al., 2025). While SFT is effective to some extent, it struggles with generalization, exploration, and adaptability (Chu et al., 2025; Guo et al., 2025). As illustrated in Figure 1, a model trained with SFT on deep-thinking trajectories over-interprets the tool and fails to reject the inappropriate tool, merely imitating cues like \"but wait\" without engaging in genuine deep thinking. As such, SFT often fails to capture the strategic flexibility needed for optimal tool use, particularly in open-ended or multi-step settings. This motivates a fundamental research question: Can RL-based training methods better equip LLMs with agentic tool-using capabilities," + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.41, + 0.874, + 0.426 + ], + "angle": 0, + "content": "and if so, what is the optimal RL design for TIR?" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.43, + 0.885, + 0.575 + ], + "angle": 0, + "content": "Recent efforts such as Search-R1 (Jin et al., 2025) and TORL (Li et al., 2025b) have begun to explore this direction. However, their focus is narrow: either constrained to search tools in question answering settings or code tools in math problem-solving. In contrast, our work aims to study RL-based training for general-purpose tool selection and application, across diverse and complex tool sets with different task types." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.579, + 0.885, + 0.902 + ], + "angle": 0, + "content": "For an RL algorithm to be effective, a well-designed reward is essential. Unlike math tasks with a single correct answer, Tool-Integrated Reasoning (TIR) tasks introduce multiple layers of complexity: they often involve multi-step interactions where each turn may require invoking multiple tools, each with carefully specified parameters. Designing effective reward signals to guide learning through this complexity remains an open and underexplored challenge. In this paper, we focus on the problem of reward design for TIR and propose a principled, generalizable framework that can be applied across various RL algorithms. While our reward design is algorithm-agnostic by nature, we empirically demonstrate its effectiveness using both Group Relative Policy Optimization (GRPO) (Shao et al., 2024) and Proximal Policy Optimization (PPO) (Schulman et al., 2017), showcasing its versatility and impact on improving tool use performance." + }, + { + "type": "text", + "bbox": [ + 0.528, + 0.906, + 0.885, + 0.922 + ], + "angle": 0, + "content": "We begin by formalizing the TIR task, and out" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.262 + ], + "angle": 0, + "content": "lining general principles for effective reward design. Building on this foundation, we show how RL algorithm can be leveraged to train LLMs for robust and context-aware tool selection and application. Empirical results demonstrate that our approach outperforms base models by \\(17\\%\\) and SFT models by \\(15\\%\\) across multiple tool use and QA benchmarks. Moreover, the trained model exhibits strong generalization to unseen scenarios and task objectives, along with emergent behaviors such as proactiveness and metacognitive reasoning." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.263, + 0.49, + 0.454 + ], + "angle": 0, + "content": "To identify optimal reward strategies, we next systematically explore a broad spectrum of reward configurations across four key dimensions: (1) reward type (what aspect to reward), (2) reward scale (how much to reward), (3) reward granularity (how detailed the reward signal is), and (4) reward dynamics (how rewards evolve over time). Through extensive experiments, we identify reward designs that best align with agentic tool use and uncover insights into what makes a reward \"useful\" for tool invoking LLMs. We summarize the core insights we derive as follows:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.458, + 0.486, + 0.489 + ], + "angle": 0, + "content": "- Longer reasoning trace is not inherently better and length rewards can degrade performance." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.494, + 0.485, + 0.526 + ], + "angle": 0, + "content": "- Dynamic reward scale helps models transition smoothly from simple to complex behaviors." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.53, + 0.485, + 0.561 + ], + "angle": 0, + "content": "- Finegrained reward decomposition leads to more stable and effective learning." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.458, + 0.486, + 0.561 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.565, + 0.489, + 0.597 + ], + "angle": 0, + "content": "We also summarize the overall contributions of our paper as follows:" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.6, + 0.486, + 0.647 + ], + "angle": 0, + "content": "- We present the first systematic study on RL-based training for general-purpose tool selection and application in LLMs." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.652, + 0.488, + 0.699 + ], + "angle": 0, + "content": "- We propose a principled reward design framework tailored for TIR and validate its effectiveness through RL algorithms including GRPO." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.703, + 0.488, + 0.767 + ], + "angle": 0, + "content": "- We conduct extensive experiments analyzing the effects of various reward strategies and distill actionable insights for future research on LLM-agent training." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.6, + 0.488, + 0.767 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.77, + 0.489, + 0.835 + ], + "angle": 0, + "content": "This work pioneers the application of RL to general TIR and provides the first empirical roadmap for reward design in TIR, paving the way toward more capable and autonomous LLM agents." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.847, + 0.271, + 0.863 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.873, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Tool-Integrated Reasoning of LLMs. Tool-integrated reasoning (TIR) has emerged as a promising approach to enhance the capabilities of" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.886, + 0.504 + ], + "angle": 0, + "content": "LLMs. Early studies introduced the concept of equipping LLMs with external tools to overcome their inherent limitations (Schick et al., 2023; Qin et al., 2024b; Yao et al., 2023), such as program executors (Chen et al., 2022) and search engines (Vu et al., 2023). To systematically assess these enhanced capabilities, several benchmarks have been proposed to evaluate tool use performance across various dimensions, including API selection, argument generation, and generalization (Qin et al., 2024c; Patil et al., 2023; Qian et al., 2024a). Building on this foundation, subsequent research has focused on constructing high-quality tool use datasets (Liu et al., 2024; Qian et al., 2025), enabling models to autonomously create and invoke tools (Qian et al., 2023, 2024b), and applying these techniques to problems spanning different modalities (Shen et al., 2025) and specialized domains (Ling et al., 2023). More recently, reinforcement learning (RL) has been explored as an effective framework to further improve TIR, demonstrating success in tasks such as information retrieval (Jin et al., 2025) and math computation (Li et al., 2025b). These advances collectively highlight the growing potential of tool-augmented LLMs for general-purpose reasoning in open-domain settings." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.518, + 0.886, + 0.84 + ], + "angle": 0, + "content": "Exploration of RL in LLMs. Previous work has primarily relied on supervised fine-tuning (SFT) with carefully curated datasets to enhance LLM performance in tool use (Schick et al., 2023; Qin et al., 2024c). Recently, reinforcement learning (RL) has gained traction as a more scalable and generalizable training paradigm. The development of RL methods for LLMs has evolved from reinforcement learning from human feedback (RLHF) (Kaufmann et al., 2023) and proximal policy optimization (PPO) (Schulman et al., 2017) to more advanced techniques such as direct preference optimization (DPO) (Rafailov et al., 2023), SimPO (Meng et al., 2024), and group relative policy optimization (GRPO) (Shao et al., 2024). Extensions like dynamic sampling policy optimization (DAPO) (Yu et al., 2025) and the more recent value-based augmented proximal policy optimization (VAPO) (Yuan et al., 2025) further improve training stability and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.886, + 0.922 + ], + "angle": 0, + "content": "Among these, GRPO (Shao et al., 2024) is specifically designed for LLMs, replacing the traditional critic with a group-based evaluation strategy. It has shown strong performance in enhancing reasoning abilities across a range of tasks, including math-" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.31 + ], + "angle": 0, + "content": "ematical problem solving (Shao et al., 2024; Xie et al., 2025), search engine interaction (Jin et al., 2025; Song et al., 2025), and code generation (Li et al., 2025b). Beyond task variety, recent studies have analyzed the influence of dataset scale (Li et al., 2025a) and GRPO's effectiveness in smaller model settings (Dang and Ngo, 2025). GRPO's flexible reward function enables adaptation to diverse objectives, such as assigning weights to subtasks (Yu et al., 2024) or constraining tool use frequency (Li et al., 2025b). In this work, we extend GRPO to enhance general tool use capabilities, improving LLMs' ability to select and interact with external tools across a wide range of scenarios." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.322, + 0.219, + 0.337 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.348, + 0.49, + 0.557 + ], + "angle": 0, + "content": "Supervised fine-tuning (SFT), as illustrated in Figure 1, often suffers from overfitting to certain patterns and constrains the model's ability to learn optimal strategies for tool use. To address this, we introduce a reinforcement learning (RL) approach for enhancing tool-integrated reasoning (TIR) in LLMs. In this section, we begin by defining the TIR task (Section 3.1), followed by our customized rollout strategy (Section 3.2) and reward design (Section 3.3). These components are then integrated into the Group Relative Policy Optimization (GRPO) framework (Shao et al., 2024) to guide model training on general TIR tasks (Section 3.4)." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.568, + 0.282, + 0.582 + ], + "angle": 0, + "content": "3.1 Task Definition" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.589, + 0.49, + 0.7 + ], + "angle": 0, + "content": "Tool-Integrated Reasoning (TIR) is the process of incorporating external tools into the reasoning trajectory of an LLM to solve a user task. A typical TIR trajectory involves multiple tool invocations over several reasoning steps, with the final outcome determined by the cumulative success of these intermediate decisions." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.702, + 0.49, + 0.751 + ], + "angle": 0, + "content": "Formally, given a tool set \\(\\mathcal{T} = \\{t_1,t_2,\\dots ,t_n\\}\\) containing \\(n\\) available tools, and a user query \\(Q\\) the reasoning trajectory up to step \\(k\\) is denoted as:" + }, + { + "type": "equation", + "bbox": [ + 0.122, + 0.763, + 0.48, + 0.781 + ], + "angle": 0, + "content": "\\[\ns _ {k} = (r _ {1}, \\mathcal {T} _ {1}, o _ {1}), (r _ {2}, \\mathcal {T} _ {2}, o _ {2}), \\ldots , (r _ {k}, \\mathcal {T} _ {k}, o _ {k}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.793, + 0.489, + 0.873 + ], + "angle": 0, + "content": "where \\( r_i \\) denotes the model's natural language reasoning at step \\( i \\), \\( \\mathcal{T}_i \\subseteq \\mathcal{T} \\) denotes the set of tool calls invoked at step \\( i \\), and \\( o_i \\) denotes the observation received after executing tools in \\( \\mathcal{T}_i \\), possibly including both environment and user feedback." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.875, + 0.49, + 0.922 + ], + "angle": 0, + "content": "At each step \\(k + 1\\), the model must generate the next reasoning step \\(r_{k + 1}\\), select a set of tools \\(\\mathcal{T}_{k + 1} \\subseteq \\mathcal{T}\\), and formulate a grounded tool call (i.e.," + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.882, + 0.117 + ], + "angle": 0, + "content": "a parameterized invocation of each tool) to make progress toward solving \\( Q \\)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.118, + 0.882, + 0.18 + ], + "angle": 0, + "content": "The model's policy is defined as \\(\\pi : s_k \\to (r_{k+1}, \\mathcal{T}_{k+1})\\), where the model's objective at each step is to select a tool set \\(\\mathcal{T}_{k+1}\\) that maximizes the immediate reward:" + }, + { + "type": "equation", + "bbox": [ + 0.547, + 0.194, + 0.846, + 0.218 + ], + "angle": 0, + "content": "\\[\n\\mathcal {T} _ {k + 1} ^ {*} = \\arg \\max _ {\\mathcal {T} _ {k + 1} \\subseteq \\mathcal {T}} R (s _ {k}, \\mathcal {T} _ {k + 1}, o _ {k + 1}),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.231, + 0.885, + 0.263 + ], + "angle": 0, + "content": "where \\(R(\\cdot)\\) represents the reward function that evaluates progress made by invoking the tools in \\(\\mathcal{T}_{k + 1}\\)." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.264, + 0.884, + 0.327 + ], + "angle": 0, + "content": "While the immediate reward at each step is maximized, the model's policy is implicitly optimized to maximize the cumulative reward over the entire trajectory, formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.562, + 0.338, + 0.83, + 0.382 + ], + "angle": 0, + "content": "\\[\n\\max _ {\\pi} \\mathbb {E} _ {\\pi} \\left[ \\sum_ {k = 1} ^ {K} R \\left(s _ {k}, \\mathcal {T} _ {k + 1}, o _ {k + 1}\\right) \\right],\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.392, + 0.885, + 0.585 + ], + "angle": 0, + "content": "This formulation is valid because our training data includes ground truth tool calls at each step, allowing step-wise reward signals to guide multi-step success. Unlike QA tasks that focus solely on the final answer, tool selection and application tasks provide dense intermediate feedback. Moreover, we later demonstrate that our method enables the model to generalize to settings where tool calls are free-form and only the final outcome matters. Therefore, out task setting encourages the model to optimize tool use at each step while aligning with the overall task goal." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.595, + 0.654, + 0.609 + ], + "angle": 0, + "content": "3.2 TIR Rollout" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.617, + 0.884, + 0.76 + ], + "angle": 0, + "content": "To enable the model to autonomously generate reasoning traces and tool calls, we utilize a system prompt as shown in Figure 4 during rollout. The Tool List placeholder denotes the tool set \\(\\mathcal{T}\\), which contains all tools available for invocation. We indicate in the instruction that the LLM should use special tokens , , and to indicate their thoughts, tool calls and responses in output." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.761, + 0.885, + 0.922 + ], + "angle": 0, + "content": "As illustrated in Figure 3, when the model output includes , we automatically parse the tool calls into individual invocations using the model-predicted parameters. The outputs from executions are then inserted into the field and appended to the dialogue history, whose format is shown in Figure 12, serving as the model's interaction trajectory. Similarly, if the output contains , the corresponding response is parsed and appended to the dialogue history." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.117, + 0.082, + 0.885, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.208, + 0.285, + 0.788, + 0.3 + ], + "angle": 0, + "content": "Figure 3: Illustration of TIR rollout and calculation of format and correctness reward." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.326, + 0.49, + 0.422 + ], + "angle": 0, + "content": "It is important to note that and are not mutually exclusive; they may co-occur within a single output. The user's initial query \\( Q \\) is placed in the Initial User Input placeholder, and any subsequent user inputs are also appended to the dialogue history when present." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.435, + 0.282, + 0.451 + ], + "angle": 0, + "content": "3.3 Reward Design" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.458, + 0.49, + 0.681 + ], + "angle": 0, + "content": "Rule-based reward mechanisms have demonstrated strong empirical performance and are commonly employed. In our training, we similarly adopt a reward formulation that combines structural and correctness-based components, in line with prior works (Jin et al., 2025; Li et al., 2025b; Xie et al., 2025). Specifically, the format reward assesses whether the model output adheres to the expected structure including thoughts, tool calls, and responses, while the correctness reward evaluates the accuracy of tool invocations. Formally, the overall reward \\( R_{\\mathrm{final}}(\\cdot) \\) is decomposed into two components: \\( R_{\\mathrm{format}} + R_{\\mathrm{correct}} \\), each described in detail below:" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.695, + 0.489, + 0.759 + ], + "angle": 0, + "content": "Format Reward. The format reward \\(\\mathcal{R}_{\\mathrm{format}} \\in \\{0,1\\}\\) checks whether the model output contains all required special tokens in the correct order as specified by the ground truth:" + }, + { + "type": "equation", + "bbox": [ + 0.114, + 0.786, + 0.444, + 0.844 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f a l l r e q u i r e d f i e l d s a p p e a r} \\\\ & \\text {a n d a r e i n t h e c o r r e c t o r d e r} \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.858, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Correctness Reward. The correctness reward \\(\\mathcal{R}_{\\mathrm{correct}} \\in [-3, 3]\\) evaluates predicted tool calls \\(P = \\{P_1, \\dots, P_m\\}\\) against ground-truth calls \\(G = \\{G_1, \\dots, G_n\\}\\). It includes three components:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.326, + 0.692, + 0.341 + ], + "angle": 0, + "content": "- Tool Name Matching:" + }, + { + "type": "equation", + "bbox": [ + 0.614, + 0.35, + 0.795, + 0.379 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {n a m e}} = \\frac {\\left| N _ {G} \\cap N _ {P} \\right|}{\\left| N _ {G} \\cup N _ {P} \\right|} \\in [ 0, 1 ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.387, + 0.882, + 0.435 + ], + "angle": 0, + "content": "where \\(N_{G}\\) and \\(N_{P}\\) are the sets of tool names extracted from the ground-truth and predicted tool calls, respectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.437, + 0.736, + 0.452 + ], + "angle": 0, + "content": "Parameter Name Matching:" + }, + { + "type": "equation", + "bbox": [ + 0.549, + 0.46, + 0.858, + 0.495 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {p a r a m}} = \\sum_ {G _ {j} \\in G} \\frac {| \\mathrm {k e y s} (P _ {G}) \\cap \\mathrm {k e y s} (P _ {P}) |}{| \\mathrm {k e y s} (P _ {G}) \\cup \\mathrm {k e y s} (P _ {P}) |} \\in [ 0, | G | ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.505, + 0.884, + 0.554 + ], + "angle": 0, + "content": "where \\( \\mathrm{keys}(P_G) \\) and \\( \\mathrm{keys}(P_P) \\) represent the parameter names of the predicted and ground-truth tool calls, respectively." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.555, + 0.75, + 0.57 + ], + "angle": 0, + "content": "Parameter Content Matching:" + }, + { + "type": "equation", + "bbox": [ + 0.567, + 0.579, + 0.839, + 0.643 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} r _ {\\text {v a l u e}} = \\sum_ {G _ {j} \\in G} \\sum_ {k \\in \\text {k e y s} (G _ {j})} \\mathbb {1} \\left[ P _ {G} [ k ] = P _ {P} [ k ] \\right] \\\\ \\in [ 0, \\sum_ {G _ {j} \\in G} | \\mathrm {k e y s} (G _ {j}) | ] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.652, + 0.882, + 0.699 + ], + "angle": 0, + "content": "where \\(P_{G}[k]\\) and \\(P_{P}[k]\\) represent the values of the parameters for the predicted and ground truth tool calls." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.702, + 0.796, + 0.716 + ], + "angle": 0, + "content": "- Total match score for each match is:" + }, + { + "type": "equation", + "bbox": [ + 0.572, + 0.727, + 0.836, + 0.742 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {m a t c h}} = r _ {\\text {n a m e}} + r _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} \\in [ 0, S _ {\\max} ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.752, + 0.881, + 0.785 + ], + "angle": 0, + "content": "where \\( S_{\\mathrm{max}} = 1 + |G| + \\sum_{G_j \\in G} |\\mathrm{keys}(G_j)| \\) denotes the maximum possible score." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.795, + 0.882, + 0.841 + ], + "angle": 0, + "content": "The total score is computed by finding the optimal matching between \\( P \\) and \\( G \\) to maximize the total match score:" + }, + { + "type": "equation", + "bbox": [ + 0.567, + 0.849, + 0.824, + 0.882 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} _ {\\text {c o r r e c t}} = 6 \\cdot \\frac {R _ {\\max}}{S _ {\\max}} - 3 \\in [ - 3, 3 ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.89, + 0.882, + 0.922 + ], + "angle": 0, + "content": "where \\( R_{\\mathrm{max}} \\) denotes the total match score from the optimal matching. The final correctness reward" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.198, + 0.086, + 0.375, + 0.099 + ], + "angle": 0, + "content": "System Prompt for Training" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.106, + 0.8, + 0.134 + ], + "angle": 0, + "content": "You are a helpful dialogue assistant capable of leveraging tool calls to solve user tasks and provide structured chat responses." + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.147, + 0.303, + 0.159 + ], + "angle": 0, + "content": "Available Tools" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.161, + 0.51, + 0.174 + ], + "angle": 0, + "content": "In your response, you can use the following tools:" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.175, + 0.283, + 0.188 + ], + "angle": 0, + "content": "{{ToolList}}" + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.201, + 0.334, + 0.214 + ], + "angle": 0, + "content": "Steps for Each Turn" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.215, + 0.619, + 0.228 + ], + "angle": 0, + "content": "1. Think: Recall relevant context and analyze the current user goal." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.229, + 0.7, + 0.242 + ], + "angle": 0, + "content": "2. Decide on Tool Usage: If a tool is needed, specify the tool and its parameters." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.242, + 0.8, + 0.269 + ], + "angle": 0, + "content": "3. Respond Appropriately: If a response is needed, generate one while maintaining consistency across user queries." + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.215, + 0.8, + 0.269 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.282, + 0.304, + 0.295 + ], + "angle": 0, + "content": "Output Format" + }, + { + "type": "code", + "bbox": [ + 0.196, + 0.296, + 0.8, + 0.391 + ], + "angle": 0, + "content": " Your thoughts and reasoning \n \n{“name”: “Tool name”, “parameters”: {“Parameter name”: “Parameter content”, “... ...”: “... ...”} \n{“name”: “... ...”, “parameters”: {“... ...”: “... ...”, “... ...”: “... ...”} \n... \n \n AI's final response " + }, + { + "type": "title", + "bbox": [ + 0.197, + 0.404, + 0.312, + 0.417 + ], + "angle": 0, + "content": "Important Notes" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.418, + 0.802, + 0.457 + ], + "angle": 0, + "content": "1. You must always include the field to outline your reasoning. Provide at least one of or . Decide whether to use (possibly multiple times), , or both." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.458, + 0.802, + 0.499 + ], + "angle": 0, + "content": "2. You can invoke multiple tool calls simultaneously in the fields. Each tool call should be a JSON object with a \"name\" field and a \"parameters\" field containing a dictionary of parameters. If no parameters are needed, leave the \"parameters\" field an empty dictionary." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.499, + 0.802, + 0.526 + ], + "angle": 0, + "content": "3. Refer to the previous dialogue records in the history, including the user's queries, previous , , and any tool feedback noted as (if exists)." + }, + { + "type": "list", + "bbox": [ + 0.196, + 0.418, + 0.802, + 0.526 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.32, + 0.556, + 0.675, + 0.571 + ], + "angle": 0, + "content": "Figure 4: The system prompt used for TIR's rollout." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.597, + 0.489, + 0.66 + ], + "angle": 0, + "content": "\\(\\mathcal{R}_{\\mathrm{correct}}\\) is the normalized reward for the matching process. We empirically set the reward scale within the range of \\([-3, 3]\\), with more analysis and ablations of reward scale presented in Section 5." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.661, + 0.487, + 0.692 + ], + "angle": 0, + "content": "The final reward value \\(\\mathcal{R}_{\\mathrm{final}}\\) is finally derived as the sum of \\(\\mathcal{R}_{\\mathrm{format}}\\) and \\(\\mathcal{R}_{\\mathrm{correct}}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.702, + 0.436, + 0.72 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} _ {\\text {f i n a l}} = \\mathcal {R} _ {\\text {f o r m a t}} + \\mathcal {R} _ {\\text {c o r r e c t}} \\in [ - 3, 4 ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.729, + 0.489, + 0.889 + ], + "angle": 0, + "content": "Unlike prior works that often rely on binary or overly simplified reward signals, our design captures the nuanced structure of tool calls by evaluating multiple interdependent components including tool names, parameter schemas, and parameter values. This fine-grained formulation better reflects the complexity of real-world tool use, where correctness cannot be reduced to a single binary criterion. We further validate the impact of this design through comprehensive analysis in Section 5." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.89, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Overall, our reward design ensures a balanced and interpretable evaluation signal by explicitly" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.597, + 0.885, + 0.707 + ], + "angle": 0, + "content": "separating structural compliance from semantic correctness. By aligning rewards with both format adherence and fine-grained tool call accuracy, the model is guided to produce outputs that are not only syntactically valid but also semantically faithful, which is crucial for downstream tool execution and final task success." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.73, + 0.753, + 0.746 + ], + "angle": 0, + "content": "3.4 RL Training with GRPO" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.758, + 0.885, + 0.854 + ], + "angle": 0, + "content": "To tune the model with structured rewards, we employ GRPO, a variant of PPO that introduces advantage normalization within grouped samples. This normalization helps stabilize training by reducing variance across samples that share a common input context. Let \\(\\pi_{\\theta}\\) represent the current policy." + }, + { + "type": "title", + "bbox": [ + 0.508, + 0.874, + 0.885, + 0.89 + ], + "angle": 0, + "content": "Normalized Advantage Across Query Groups." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.891, + 0.884, + 0.923 + ], + "angle": 0, + "content": "For each query \\(Q\\), its responses derived from the rollout form a group \\(G_{Q}\\) consisting of multiple" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.085, + 0.482, + 0.101 + ], + "angle": 0, + "content": "responses and their corresponding reward values:" + }, + { + "type": "equation", + "bbox": [ + 0.143, + 0.116, + 0.457, + 0.133 + ], + "angle": 0, + "content": "\\[\nG _ {Q} = \\left\\{A, \\left(s _ {1}, r _ {1}\\right), \\left(s _ {2}, r _ {2}\\right), \\dots , \\left(s _ {n}, r _ {n}\\right) \\right\\}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.148, + 0.49, + 0.243 + ], + "angle": 0, + "content": "where \\(A\\) denotes the ground-truth annotation for \\(Q\\), and each reward \\(r_i\\) is computed as the sum of the format and correctness rewards associated with response \\(s_i\\), i.e., \\(r_i = \\mathcal{R}_{\\mathrm{format}}(s_i, A) + \\mathcal{R}_{\\mathrm{correct}}(s_i, A)\\). For each group, we calculate the mean and standard deviation of the rewards:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.268, + 0.445, + 0.309 + ], + "angle": 0, + "content": "\\[\n\\mu_ {Q} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} r _ {i}, \\quad \\sigma_ {Q} = \\sqrt {\\frac {1}{n} \\sum_ {i = 1} ^ {n} (r _ {i} - \\mu_ {Q}) ^ {2}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.322, + 0.487, + 0.355 + ], + "angle": 0, + "content": "Then, for each sample \\( s_i \\) in the group, we define the normalized advantage:" + }, + { + "type": "equation", + "bbox": [ + 0.232, + 0.364, + 0.371, + 0.391 + ], + "angle": 0, + "content": "\\[\nA _ {i} (s _ {i} | Q) = \\frac {r _ {i} - \\mu_ {Q}}{\\sigma_ {Q} + \\eta}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.118, + 0.398, + 0.469, + 0.413 + ], + "angle": 0, + "content": "where \\(\\eta\\) is a constant to avoid division by zero." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.425, + 0.49, + 0.489 + ], + "angle": 0, + "content": "Policy Optimization Objective. The policy \\(\\pi_{\\theta}\\) is optimized using the standard clipped PPO objective, adapted with our group-wise normalized advantages:" + }, + { + "type": "equation", + "bbox": [ + 0.128, + 0.512, + 0.473, + 0.569 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} J _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {Q \\sim \\mathcal {D}} \\mathbb {E} _ {s _ {i} \\sim \\pi_ {\\theta}} \\left[ \\min \\left(\\frac {\\pi_ {\\theta} (s _ {i} | Q)}{\\pi_ {\\mathrm {o l d}} (s _ {i} | Q)} A _ {i} (s _ {i} | Q), \\right. \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (s _ {i} | Q)}{\\pi_ {\\mathrm {o l d}} (s _ {i} | Q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} (s _ {i} | Q)\\right) \\Bigg ] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.582, + 0.489, + 0.711 + ], + "angle": 0, + "content": "Unlike the original GRPO formulations, we omit the KL penalty term against a reference model. This design choice encourages the model to more freely adapt its behavior to our custom response format and structured reward signals. In practice, we observe that this leads to faster convergence and comparable performance, while also simplifying the training pipeline." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.712, + 0.489, + 0.809 + ], + "angle": 0, + "content": "Overall, this objective guides the policy to generate structurally consistent and semantically accurate tool calls, while group-wise normalization mitigates reward variance across queries, leading to more stable and sample-efficient alignment with task-specific response requirements." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.823, + 0.262, + 0.839 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.852, + 0.294, + 0.867 + ], + "angle": 0, + "content": "4.1 Training Dataset" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.489, + 0.921 + ], + "angle": 0, + "content": "To support robust tool learning through RL, we construct a mixed dataset spanning diverse tool use scenarios:" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.085, + 0.883, + 0.149 + ], + "angle": 0, + "content": "- ToolACE (Liu et al., 2024): A general tool use dataset where the model learns when to invoke tools versus respond directly, improving decision-making in multi-step interactions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.152, + 0.884, + 0.246 + ], + "angle": 0, + "content": "- Hammer (Masked) (Lin et al., 2024): A subset of Hammer with randomized tool and parameter names, forcing the model to rely on descriptions rather than memorized labels, thus enhancing generalization and reducing overfitting to certain tools." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.251, + 0.882, + 0.331 + ], + "angle": 0, + "content": "- xLAM (Zhang et al., 2024): A compositional dataset requiring one or multiple tool calls per turn, encouraging the model to reason about tool dependencies and plan diverse tool calling action actively." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.085, + 0.884, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.335, + 0.884, + 0.512 + ], + "angle": 0, + "content": "For RL training, we sample 2K examples from ToolACE and 1K each from Hammer and xLAM, creating a balanced dataset spanning diverse levels of complexity and tool use. Multi-step trajectories are decomposed into single-step instances, with prior dialogue history injected into the user prompt (as shown in Figure 12) to preserve context. This setup encourages strategic exploration and teaches the model to select and apply tools appropriately within each step. Please see Appendix B for more details and justifications." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.522, + 0.719, + 0.537 + ], + "angle": 0, + "content": "4.2 Experiment Settings" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.542, + 0.884, + 0.751 + ], + "angle": 0, + "content": "Training. We conduct all RL experiments using the veRL framework (Sheng et al., 2024), adopting the GRPO algorithm detailed in the previous section. For each training step, we sample a batch of 512, and generate 4 responses per query, training for 15 epochs in total (see Appendix B for full configuration details). To encourage broader policy exploration, we remove KL regularization and apply a generation temperature of 1.0. We initialize our models with the Qwen-2.5-Instruct (Team, 2024) and Llama-3.2-Instruct (Dubey et al., 2024) series, which are further tuned under the GRPO objective with our customized reward design." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.759, + 0.885, + 0.888 + ], + "angle": 0, + "content": "Evaluation. We evaluate our approach on the Berkeley Function Call Leaderboard (BFCL) (Patil et al., 2024), a comprehensive benchmark that spans a diverse set of challenges, including single-step reasoning, multi-step tool use, real-time execution, irrelevant tool rejection, simultaneous multi-tool selection, and multi-tool application2. In addition, we present results on API-" + }, + { + "type": "page_footnote", + "bbox": [ + 0.509, + 0.895, + 0.851, + 0.92 + ], + "angle": 0, + "content": "\\(^{2}\\)https://gorilla.cs.berkeley.edu/blogs/13_bfcl_v3-multi_turn.html" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.295 + ], + "angle": 0, + "content": "Bank (Li et al., 2023), a three-level evaluation framework comprising 73 diverse and complex API tools. It assesses an LLM's ability to select and apply tools through natural multi-turn dialogues, across three levels of difficulty. We also evaluate on a representative QA benchmark Bamboogle (Press et al., 2022), which comprises a variety of question-answering tasks where performance is measured based on the final answer accuracy rather than the correctness of tool use. These broad coverage makes our evaluation setting effective for evaluating real-world LLM tool use proficiency. All results are reported in terms of accuracy." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.303, + 0.49, + 0.658 + ], + "angle": 0, + "content": "Baselines. We compare our approach against several baselines to better isolate the effects of GRPO training: (1) Raw Instruct Model: the original model without any additional fine-tuning or RL, evaluated using the same prompts. (2) SFT on RL Data: the instruct model fine-tuned using the same 4K / selected 400 data points as the RL training set, providing a comparison point to assess whether GRPO training outperforms standard SFT. (3) GRPO on SFT Model: GRPO is applied to a model that has already undergone SFT on the selected 400 data points. This setup allows us to evaluate the impact of initializing GRPO with a format-aware model, in contrast to starting from the raw instruct model in a cold start manner. (4) PPO: We also include the standard PPO setting as a baseline to evaluate whether our reward design is effective beyond GRPO. We report results for both a cold start PPO model and a PPO model initialized with SFT, using the same hyperparameters as in the GRPO setup for a fair comparison. Please refer to Appendix B for more details and justifications." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.667, + 0.22, + 0.682 + ], + "angle": 0, + "content": "4.3 Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.688, + 0.49, + 0.864 + ], + "angle": 0, + "content": "Main Results. We report BFCL and API-Bank results in Table 1 and Table 2, respectively. Our GRPO method, trained from scratch on the Qwen2.5-Instruct series, generally outperforms other baselines, achieving \\( \\tilde{10}\\% \\) absolute gains over SFT trained on the same data volume. In contrast, LLaMA-3.2-Instruct shows less improvement, possibly due to the model's lower adaptability to GRPO-style generalization. Nevertheless, it remains competitive and outperforms most baselines on API-Bank." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.49, + 0.922 + ], + "angle": 0, + "content": "SFT Initialization Impacts. Interestingly, GRPO also improves models initialized with limited SFT, often outperforming full-scale SFT" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.089, + 0.691, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.186, + 0.664, + 0.198 + ], + "angle": 0, + "content": "(a) Format Reward" + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.089, + 0.876, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.186, + 0.863, + 0.198 + ], + "angle": 0, + "content": "(b) Correctness Reward" + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.214, + 0.883, + 0.258 + ], + "angle": 0, + "content": "Figure 5: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different model initialization strategies." + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.279, + 0.691, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.376, + 0.663, + 0.388 + ], + "angle": 0, + "content": "(a) Format Reward" + }, + { + "type": "image", + "bbox": [ + 0.703, + 0.279, + 0.876, + 0.369 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.376, + 0.863, + 0.388 + ], + "angle": 0, + "content": "(b) Correctness Reward" + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.404, + 0.883, + 0.448 + ], + "angle": 0, + "content": "Figure 6: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different RL strategies (GRPO v.s. PPO)." + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.475, + 0.885, + 0.668 + ], + "angle": 0, + "content": "trained on 10 times more data. However, this setup still underperforms compared to cold start GRPO. We hypothesize that SFT initialization leads to memorization and overfitting, which reduces the impact of GRPO's effectiveness in generalization. As shown in Figure 5, SFT-initialized models achieve higher training rewards due to distributional alignment between SFT and RL data, but empirically generalize worse on the two benchmarks. This further highlights that higher training rewards do not necessarily translate to better generalization." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.68, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Reward Design on PPO. We also evaluate PPO under both cold start and SFT-initialized settings to examine the effectiveness of our reward design. The results show that while PPO with a cold start can outperform SFT in some cases, it tends to be less stable across different model settings. In contrast, GRPO consistently achieves higher rewards even from a cold start, suggesting that our reward design is partially effective for PPO but works best in the GRPO framework. As shown in Figure 6, GRPO not only achieves higher correctness rewards but also gains format rewards more rapidly during training. Interestingly, PPO benefits from SFT initialization, generally yielding better results than a cold start, whereas GRPO performs better" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.12, + 0.082, + 0.88, + 0.355 + ], + "angle": 0, + "content": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400)40.21%65.12%61.11%56.69%1.00%94.44%60.14%
Qwen2.5-1.5B-Instruct (SFT4k)40.67%59.94%59.84%59.31%1.00%88.89%71.34%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (PPO Cold Start)38.32%79.40%70.11%45.24%0.87%100.00%18.09%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400)34.08%69.29%61.50%41.40%0.00%94.44%8.11%
Qwen2.5-3B-Instruct (SFT4k)41.97%62.85%54.73%59.17%0.75%77.78%75.12%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (PPO Cold Start)51.15%82.42%78.52%67.78%4.88%94.12%73.87%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400)34.08%69.29%66.68%41.4%0.00%94.44%8.11%
Qwen2.5-7B-Instruct (SFT4k)36.53%45.15%53.5%57.13%0.75%72.22%72.32%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100.00%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100.00%14.19%
Qwen2.5-7B-Instruct (PPO Cold Start)46.68%79.33%78.16%63.17%0.38%88.89%52.92%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)58.38%86.17%78.25%74.9%18.12%83.33%76.68%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400)41.22%64.27%62.18%58.37%0.75%66.67%71.12%
Llama-3.2-3B-Instruct (SFT4k)44.16%65.42%67.02%63.04%1.38%77.78%78.25%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (PPO Cold Start)42.98%84.00%72.00%52.80%2.88%100.00%31.94%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.316, + 0.365, + 0.68, + 0.379 + ], + "angle": 0, + "content": "Table 1: BFCL V3 Benchmark Results (Main Result)" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.392, + 0.506, + 0.655 + ], + "angle": 0, + "content": "
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400)53.60%57.14%50.75%44.27%
Qwen2.5-1.5B-Instruct (SFT4k)47.07%52.88%52.24%26.72%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (PPO Cold Start)40.54%44.61%31.34%32.82%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)63.15%70.68%61.19%41.22%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400)52.76%59.65%50.75%32.82%
Qwen2.5-3B-Instruct (SFT4k)50.92%55.64%43.28%40.46%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (PPO Cold Start)57.62%64.66%59.70%35.11%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)67.00%73.43%67.16%47.33%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400)50.59%55.89%50.75%34.35%
Qwen2.5-7B-Instruct (SFT4k)47.07%51.13%34.33%41.22%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.40%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (PPO Cold Start)61.64%68.67%44.78%48.85%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)64.66%73.93%61.19%38.17%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400)52.76%60.65%35.82%37.40%
Llama-3.2-3B-Instruct (SFT4k)43.89%53.88%29.85%20.61%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (PPO Cold Start)55.78%60.65%41.79%48.09%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)59.13%65.66%52.24%42.75%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.154, + 0.666, + 0.471, + 0.68 + ], + "angle": 0, + "content": "Table 2: API-Bank Test Results (Main Result)" + }, + { + "type": "table", + "bbox": [ + 0.536, + 0.392, + 0.88, + 0.656 + ], + "angle": 0, + "content": "
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400)24.8%0.78
Qwen2.5-1.5B-Instruct (SFT4k)23.2%1.25
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (PPO Cold Start)23.2%2.38
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)44.0%1.19
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400)54.4%0.86
Qwen2.5-3B-Instruct (SFT4k)49.6%0.92
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (PPO Cold Start)40.0%1.14
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)60.0%1.32
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400)28.8%3.71
Qwen2.5-7B-Instruct (SFT4k)30.4%1.06
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (PPO Cold Start)48.0%1.25
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)72.0%1.63
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400)44.0%0.98
Llama-3.2-3B-Instruct (SFT4k)48.8%0.98
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (PPO Cold Start)29.6%1.42
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)52.0%0.89
" + }, + { + "type": "table_caption", + "bbox": [ + 0.545, + 0.666, + 0.87, + 0.68 + ], + "angle": 0, + "content": "Table 3: Bamboogle Test Results (Main Result)" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.706, + 0.49, + 0.785 + ], + "angle": 0, + "content": "when trained from scratch. These findings highlight that while PPO can benefit from our reward design, its impact is more limited compared to the more robust and consistent improvements observed with GRPO." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.809, + 0.491, + 0.922 + ], + "angle": 0, + "content": "Generalization Studies. We evaluate the generalization ability of our trained model in two challenging settings: unfamiliar scenarios and novel task goals (both from BFCL benchmark subset). Specifically, we test the model's performance in tool usage within unseen programming languages and its ability to detect irrelevant tools, neither of" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.721, + 0.691, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.53, + 0.82, + 0.679, + 0.832 + ], + "angle": 0, + "content": "(a) Unfamiliar Scenario" + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.722, + 0.876, + 0.813 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.727, + 0.819, + 0.852, + 0.832 + ], + "angle": 0, + "content": "(b) Unfamiliar Goal" + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.848, + 0.883, + 0.905 + ], + "angle": 0, + "content": "Figure 7: Qwen2.5-3B-Instruct's performance across unfamiliar programming language scenarios (left) and novel relevance detection task goals (right), evaluated under different training settings." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.129, + 0.087, + 0.448, + 0.104 + ], + "angle": 0, + "content": "User: I would like to buy a movie ticket in San Jose at 11 o'clock in the night. Tools Available:" + }, + { + "type": "text", + "bbox": [ + 0.131, + 0.105, + 0.45, + 0.113 + ], + "angle": 0, + "content": "1. Movies_1_BuyMovieTickets/movie_name, number_ofTickets, ...)" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.113, + 0.366, + 0.121 + ], + "angle": 0, + "content": "\\(\\rightarrow\\) Buy tickets for a movie at a specific time and location." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.121, + 0.319, + 0.128 + ], + "angle": 0, + "content": "2. Movies 1 FindMovies location." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.129, + 0.326, + 0.137 + ], + "angle": 0, + "content": "\\(\\rightarrow\\) Search movies by location, genre, or format." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.138, + 0.409, + 0.146 + ], + "angle": 0, + "content": "3. Movies_1_GetTimesForMovie/movie_name, location, ...)" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.147, + 0.382, + 0.154 + ], + "angle": 0, + "content": "\\(\\rightarrow\\) Get showtimes for a movie on a specific date and location." + }, + { + "type": "list", + "bbox": [ + 0.131, + 0.105, + 0.45, + 0.154 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.16, + 0.461, + 0.168 + ], + "angle": 0, + "content": " The user wants to buy a movie ticket in San Jose at 11 PM. The available" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.169, + 0.486, + 0.211 + ], + "angle": 0, + "content": "tool, Movies_1_BuyMovieTickets, allows us to purchase movie tickets with the required details such as movie name, number of tickets, show date, location, show time, and show type. We need to find the movie name and the show date. However, the user did not specify the movie name or the show date. We can infer the show date from the current date, but we need to know the movie name to proceed with purchase. " + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.219, + 0.474, + 0.227 + ], + "angle": 0, + "content": " To assist in purchasing a movie ticket, I need the name of the movie and" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.228, + 0.48, + 0.236 + ], + "angle": 0, + "content": "the specific date of the show. Could you please provide this information? " + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.25, + 0.443, + 0.258 + ], + "angle": 0, + "content": "User: What is the slope of the line which is perpendicular to the line with the" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.259, + 0.218, + 0.267 + ], + "angle": 0, + "content": "equation \\( y = 3x + 2 \\)?" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.267, + 0.198, + 0.275 + ], + "angle": 0, + "content": "Tools Available:" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.276, + 0.439, + 0.284 + ], + "angle": 0, + "content": "1. findcritical_points(function, variable, range=[0.0, 3.4])" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.285, + 0.427, + 0.301 + ], + "angle": 0, + "content": "\\(\\rightarrow\\) Find critical points of a function with respect to a variable, optionally within a range." + }, + { + "type": "list", + "bbox": [ + 0.13, + 0.276, + 0.439, + 0.301 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.307, + 0.488, + 0.357 + ], + "angle": 0, + "content": " The problem is asking for the slope of a line that is perpendicular to the line with the equation \\( y = 3x + 2 \\). The slope of the given line is 3. The slope of a line that is perpendicular to another line is the negative reciprocal of the original line's slope. So, the slope of the line perpendicular to \\( y = 3x + 2 \\) would be -1/3. The given tool find_critical_points is not relevant to finding the slope of a line and cannot be used to solve this problem." + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.365, + 0.428, + 0.373 + ], + "angle": 0, + "content": " The slope of the line that is perpendicular to the line with the" + }, + { + "type": "text", + "bbox": [ + 0.13, + 0.373, + 0.295, + 0.382 + ], + "angle": 0, + "content": "equation \\( y = 3x + 2 \\) is -1/3. " + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.396, + 0.487, + 0.426 + ], + "angle": 0, + "content": "Table 4: Cases of Qwen2.5-3B-Instruct (GRPO Cold Start) generalizing to irrelevant tool detection goals." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.455, + 0.489, + 0.648 + ], + "angle": 0, + "content": "which were explicitly included during RL training or in the dataset. As shown in Figure 7, Qwen2.5-3B-Instruct, when trained from scratch with our GRPO-based reward design, consistently achieves highest performance. Additionally, Section 4.3 presents two qualitative examples where the model proactively rejects inappropriate tool use—first by clarifying ambiguous intent, and second by opting to answer directly without tools. These behaviors reflect emergent proactivity and metacognition, enhancing efficiency, reducing hallucinations, and signaling foundational agentic intelligence." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.662, + 0.489, + 0.822 + ], + "angle": 0, + "content": "Free-form Inference Effectiveness. While our model is trained with a focus on tool call format and correctness, we further evaluate its ability to handle free-form tool use in a QA setting. Unlike the structured tool selection and application tasks, QA setting: (1) imposes no constraints on tool call parameters, and (2) evaluates only the final answer, making it a \"goal-oriented\" rather than a \"process-oriented\" task. This naturally introduces a multi-step interaction scenario." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.825, + 0.489, + 0.922 + ], + "angle": 0, + "content": "Specifically, we use Bamboogle, a multi-hop QA dataset, to assess this capability. The model is equipped with a web search tool, and we report both the answer accuracy and the number of tool calls for all baselines and our approach. As shown in Table 3, our reward design achieves the highest" + }, + { + "type": "image", + "bbox": [ + 0.518, + 0.09, + 0.691, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.539, + 0.187, + 0.669, + 0.2 + ], + "angle": 0, + "content": "(a) Response Length" + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.089, + 0.876, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.73, + 0.187, + 0.849, + 0.199 + ], + "angle": 0, + "content": "(b) Length Reward" + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.215, + 0.883, + 0.244 + ], + "angle": 0, + "content": "Figure 8: Response length (left) and its reward (right) trends across training steps for different models." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.269, + 0.884, + 0.381 + ], + "angle": 0, + "content": "performance, despite this setting not being explicitly seen during training. Notably, our cold start GRPO model surpasses others in accuracy without relying on excessive number of tool calls. This suggests that the model can flexibly invoke tools when needed, effectively leverage feedback, wisely and efficiently navigating toward the correct answer." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.393, + 0.619, + 0.409 + ], + "angle": 0, + "content": "5 Analysis" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.418, + 0.884, + 0.498 + ], + "angle": 0, + "content": "In this section, we conduct a series of ablation studies to identify the most effective reward design for tool calling. We explore various factors including reward type, scale, granularity, and temporal dynamics." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.509, + 0.75, + 0.525 + ], + "angle": 0, + "content": "5.1 Effect of Length Reward" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.53, + 0.884, + 0.626 + ], + "angle": 0, + "content": "We first examine the role of a length-based reward. Prior work has demonstrated that the R1-like models can promote deeper reasoning, often reflected in longer thinking traces. To encourage this behavior, we introduce a reward term proportional to the length of the field:" + }, + { + "type": "equation", + "bbox": [ + 0.593, + 0.636, + 0.796, + 0.671 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} _ {\\text {l e n g t h}} = \\min \\left(\\frac {L _ {\\text {t h i n k}}}{L _ {\\text {t a r g e t}}}, 1\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.68, + 0.884, + 0.825 + ], + "angle": 0, + "content": "where \\( L_{\\text{think}} \\) denotes the length of the thinking segment in model's output, and \\( L_{\\text{target}} \\) denotes the target output length, which we empirically set to 512. We found that the raw model rarely generates responses longer than half this length, making 512 a reasonable and effective target for encouraging longer outputs. This length-based component is added to the overall reward, which now consists of format, correctness, and reasoning length." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.826, + 0.884, + 0.922 + ], + "angle": 0, + "content": "As shown in Figure 8, both response length and the length reward generally increase throughout training, particularly for the Qwen model series. This indicates that the length reward effectively encourages longer reasoning. However, the downstream results in Table 5 reveal that adding a length" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.12, + 0.082, + 0.881, + 0.191 + ], + "angle": 0, + "content": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (w/ Length Reward)33.23%70.58%71.36%35.63%0.50%94.44%4.52%
Qwen2.5-1.5B-Instruct (Dynamic)28.51%53.23%48.23%38.07%0.00%55.56%25.08%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (w/ Length reward)48.89%77.83%78.61%63.56%4.50%88.24%71.22%
Qwen2.5-3B-Instruct (Dynamic)48.24%77.60%79.11%63.22%3.00%88.89%68.53%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (w/ Length reward)44.98%78.02%77.54%56.55%1.25%100.00%63.76%
Llama-3.2-3B-Instruct (Dynamic)43.15%75.50%71.64%56.06%1.00%100.00%57.82%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.334, + 0.199, + 0.663, + 0.214 + ], + "angle": 0, + "content": "Table 5: BFCL V3 Benchmark Results (Length)" + }, + { + "type": "image", + "bbox": [ + 0.122, + 0.243, + 0.296, + 0.336 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.143, + 0.341, + 0.274, + 0.355 + ], + "angle": 0, + "content": "(a) Response Length" + }, + { + "type": "image", + "bbox": [ + 0.306, + 0.243, + 0.482, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.334, + 0.341, + 0.454, + 0.355 + ], + "angle": 0, + "content": "(b) Length Reward" + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.37, + 0.489, + 0.414 + ], + "angle": 0, + "content": "Figure 9: Response length (left) and its reward (right) trends across training steps within the dynamic length reward training setting." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.438, + 0.489, + 0.567 + ], + "angle": 0, + "content": "reward does not consistently improve task performance, and in smaller-scale models, it can even cause substantial degradation. These observations suggest that while extended reasoning may appear desirable, it is not always beneficial for tool use tasks. In fact, excessive length may introduce unnecessary complexity, leading to overthinking and reduced effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.576, + 0.489, + 0.641 + ], + "angle": 0, + "content": "Dynamic Length Reward. Since fixed-length rewards showed minimal impact and converged quickly, we explored a dynamic length reward that adapts over training steps. Specifically, we define:" + }, + { + "type": "equation", + "bbox": [ + 0.157, + 0.651, + 0.443, + 0.687 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} _ {\\mathrm {d y n a m i c}} = \\min \\left(\\frac {L _ {\\mathrm {t h i n k}}}{L _ {\\mathrm {t a r g e t}} \\cdot (1 + p)}, 1\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.696, + 0.487, + 0.776 + ], + "angle": 0, + "content": "where \\(S\\) denotes the training steps and \\(p = \\frac{S_{\\mathrm{current}}}{S_{\\mathrm{total}}} \\in [0,1]\\) represents the normalized training progress. This formulation gradually increases the target thinking length over time, aligning with model maturity." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.777, + 0.489, + 0.889 + ], + "angle": 0, + "content": "As shown in fig. 9, this approach yields a steadier growth in thinking length, particularly for the Llama model. However, the performance results in Table 5 reveal that even scheduled rewards fail to improve performance. This further supports our hypothesis that extended reasoning may not benefit this task and can even have adverse effects." + }, + { + "type": "text", + "bbox": [ + 0.518, + 0.243, + 0.868, + 0.341 + ], + "angle": 0, + "content": "Takeaway 1: While length rewards encourage longer reasoning traces, they do not consistently improve task performance and may even harm it in smaller models, highlighting that longer reasoning is not inherently better for tool use tasks." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.373, + 0.736, + 0.388 + ], + "angle": 0, + "content": "5.2 Effect of Reward Scale" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.394, + 0.884, + 0.554 + ], + "angle": 0, + "content": "Next, we investigate the effect of reward scaling, specifically the relative weighting between correctness and format rewards. Prior work in R1-style RL commonly assigns a higher weight to correctness reward than to format reward (Xie et al., 2025; Jin et al., 2025), emphasizing the importance of learning correct answer over superficial adherence to format. This strategy helps prevent reward hacking, where a model might exploit formatting heuristics without learning task semantics." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.556, + 0.884, + 0.651 + ], + "angle": 0, + "content": "To test the importance of this design choice, we conduct an ablation where we equalize the maximum correctness and format rewards by setting the former's range to \\([-1, 1]\\), matching that of the format reward. This adjustment only affects the final normalization step of the correctness reward:" + }, + { + "type": "equation", + "bbox": [ + 0.568, + 0.661, + 0.824, + 0.695 + ], + "angle": 0, + "content": "\\[\n\\mathcal {R} _ {\\mathrm {c o r r e c t}} = 2 \\cdot \\frac {R _ {\\mathrm {m a x}}}{S _ {\\mathrm {m a x}}} - 1 \\in [ - 1, 1 ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.704, + 0.868, + 0.718 + ], + "angle": 0, + "content": "where all variables are defined as in Section 3.3." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.72, + 0.884, + 0.88 + ], + "angle": 0, + "content": "As shown in Table 6, this equal-scaling variant, denoted as \"Equal Max\", results in a slight drop in overall accuracy across most models, with the exception of Qwen2.5-3B, which maintains performance comparable to the original setting. These results underscore the importance of assigning greater weight to correctness reward: doing so helps steer the model toward mastering the core reasoning and tool use capabilities necessary for robust generalization." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.89, + 0.884, + 0.922 + ], + "angle": 0, + "content": "Dynamic Reward Scaling. Building on the insight that correctness reward plays a more critical" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.12, + 0.083, + 0.881, + 0.224 + ], + "angle": 0, + "content": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Equal max)39.47%78.56%75.50%45.45%2.50%100.00%16.44%
Qwen2.5-1.5B-Instruct (Two stage)38.85%77.96%76.23%44.51%2.25%100.00%10.61%
Qwen2.5-1.5B-Instruct (Dynamic)45.71%78.31%75.73%58.91%2.50%100.00%57.20%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Equal max)51.76%81.50%79.50%69.79%4.25%88.89%78.07%
Qwen2.5-3B-Instruct (Two stage)50.66%80.62%78.82%67.93%3.50%88.89%76.42%
Qwen2.5-3B-Instruct (Dynamic)53.81%81.44%80.75%75.43%3.62%77.78%88.82%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Equal max)42.47%67.77%75.05%55.75%1.00%88.89%59.56%
Llama-3.2-3B-Instruct (Two stage)41.33%65.54%72.70%55.22%0.75%88.89%57.59%
Llama-3.2-3B-Instruct (Dynamic)46.85%83.00%72.77%61.00%3.38%88.89%59.37%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.339, + 0.232, + 0.658, + 0.246 + ], + "angle": 0, + "content": "Table 6: BFCL V3 Benchmark Results (Scale)" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.273, + 0.489, + 0.45 + ], + "angle": 0, + "content": "role, we are further motivated by the intuition that different reward components may benefit from being emphasized at different stages of training. This leads us to explore dynamically adjusting reward scales in accordance with training progress. Specifically, we hypothesize that in early training, the model should prioritize learning the correct output format, which entails an easier objective, before gradually shifting focus to the more challenging goal of tool use correctness. To test this hypothesis, we design two dynamic reward scaling strategies:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.453, + 0.49, + 0.598 + ], + "angle": 0, + "content": "- Two stage (Coarse) Setting: We divide training into two phases. In the first \\(s\\) training steps, we downscale the correctness reward to \\(\\frac{1}{3}\\) of its original scale while keeping the format reward at its original scale. After step \\(s\\), we restore the correctness reward to its original scale and simultaneously reduce the format reward to range \\([0, 0.5]\\) (\\(\\frac{1}{2}\\) of its original scale). Formally the reward scales are:" + }, + { + "type": "equation", + "bbox": [ + 0.158, + 0.606, + 0.457, + 0.648 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S c a l e} _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} [ 0, 1 ] & \\text {i f S _ {\\text {c u r r e n t}} < s} \\\\ [ 0, 0. 5 ] & \\text {o t h e r w i s e} \\end{array} , \\right.\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.163, + 0.659, + 0.454, + 0.701 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S c a l e} _ {\\text {c o r r e c t}} = \\left\\{ \\begin{array}{l l} [ - 1, 1 ] & \\text {i f S _ {\\text {c u r r e n t}} < s} \\\\ [ - 3, 3 ] & \\text {o t h e r w i s e} \\end{array} \\right.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.706, + 0.49, + 0.817 + ], + "angle": 0, + "content": "where \\( S_{\\mathrm{current}} \\) denotes the current training step. In our experiments, we empirically set the switching point to \\( s = 30 \\) steps, as we observed that the format reward typically experiences a significant increase within the first 30 steps. Therefore, it is more beneficial for later steps to shift focus toward optimizing correctness." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.825, + 0.49, + 0.922 + ], + "angle": 0, + "content": "- Dynamic (Finegrained) Setting: We apply continuous interpolation between the two reward scales throughout training. Initially, both the format and correctness reward scales are set equally. Over time, the format reward scale linearly decays to its original value, while the correctness" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.275, + 0.691, + 0.368 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.545, + 0.373, + 0.664, + 0.386 + ], + "angle": 0, + "content": "(a) Format Reward" + }, + { + "type": "image", + "bbox": [ + 0.702, + 0.275, + 0.876, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.716, + 0.373, + 0.863, + 0.386 + ], + "angle": 0, + "content": "(b) Correctness Reward" + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.402, + 0.883, + 0.445 + ], + "angle": 0, + "content": "Figure 10: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different reward scale dynamics." + }, + { + "type": "text", + "bbox": [ + 0.525, + 0.47, + 0.884, + 0.534 + ], + "angle": 0, + "content": "reward scale gradually increases to its original value, allowing the training to shift focus from format adherence to task correctness accordingly. Formally, the dynamic scaling is then defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.591, + 0.547, + 0.816, + 0.565 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S c a l e} _ {\\text {f o r m a t}} = [ - 2 + p, 2 - p ],\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.593, + 0.576, + 0.815, + 0.594 + ], + "angle": 0, + "content": "\\[\n\\operatorname {S c a l e} _ {\\text {c o r r e c t}} = [ - 2 - p, 2 + p ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.524, + 0.6, + 0.885, + 0.664 + ], + "angle": 0, + "content": "where \\(p \\in [0,1]\\) similarly represents the normalized training progress. This design ensures a smooth shift of learning focus from format fidelity to correctness." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.669, + 0.884, + 0.862 + ], + "angle": 0, + "content": "We present the reward dynamics of the original and two dynamic scaling strategies in Figure 10. As shown in Table 6, the Two stage (Coarse) reward setting unexpectedly leads to a drop in performance, whereas the Dynamic (Finegrained) scaling could improve model's benchmarking performance. These findings suggest that abrupt shifts in reward scale may negatively impact the training dynamics. In contrast, a smoother and gradual transition from simpler objectives to more nuanced ones appears to better support the model's learning trajectory and generalization during GRPO training." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.125, + 0.089, + 0.151, + 0.107 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.153, + 0.09, + 0.468, + 0.185 + ], + "angle": 0, + "content": "Takeaway 2: Gradually adjusting reward scales during training, rather than abrupt changes, better supports model learning and generalization, highlighting the benefits of a smoother transition from simpler objectives to more complex ones." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.218, + 0.393, + 0.234 + ], + "angle": 0, + "content": "5.3 Effect of Reward Granularity" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.238, + 0.487, + 0.414 + ], + "angle": 0, + "content": "We now perform a detailed analysis of the effect of reward granularity, focusing specifically on the correctness reward. Tool calling, by nature, poses challenges for reward assignment, as it involves multiple facets beyond a single definitive answer (e.g., in contrast to math reasoning tasks). Our original reward design decomposes correctness into matching the tool name, parameter names, and parameter values, offering a finegrained, \"process-oriented\" signal that reflects partial correctness in tool usage." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.416, + 0.49, + 0.464 + ], + "angle": 0, + "content": "To assess the impact of this granularity, we evaluate three alternative reward formulations with progressively coarser levels of aggregation:" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.467, + 0.49, + 0.515 + ], + "angle": 0, + "content": "- Finegrained: We apply strict exact-match constraints to both tool name and parameter name matching. Specifically, we define:" + }, + { + "type": "equation", + "bbox": [ + 0.211, + 0.529, + 0.406, + 0.543 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {n a m e}} = \\mathbb {1} \\left[ N _ {G} = N _ {P} \\right] \\in \\{0, 1 \\}\n\\]" + }, + { + "type": "equation", + "bbox": [ + 0.151, + 0.548, + 0.466, + 0.577 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {p a r a m}} = \\sum_ {G _ {j} \\in G} \\mathbb {1} \\left[ \\operatorname {k e y s} \\left(P _ {G}\\right) = \\operatorname {k e y s} \\left(P _ {P}\\right) \\right] \\in [ 0, | G | ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.588, + 0.489, + 0.653 + ], + "angle": 0, + "content": "- Intermediate: We combine the parameter name and value rewards into a single term that enforces an exact match on the entire parameter dictionary. Formally:" + }, + { + "type": "equation", + "bbox": [ + 0.165, + 0.664, + 0.452, + 0.693 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} = \\sum_ {G _ {j} \\in G} \\mathbb {1} \\left[ P _ {G} = P _ {P} \\right] \\in [ 0, | G | ]\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.704, + 0.489, + 0.784 + ], + "angle": 0, + "content": "- Coarse: At the coarsest level, we fully entangle tool name, parameter names, and parameter values, treating the entire tool set as a unit. Reward is given only if the generated tool set exactly matches the ground truth:" + }, + { + "type": "equation", + "bbox": [ + 0.17, + 0.798, + 0.446, + 0.813 + ], + "angle": 0, + "content": "\\[\nr _ {\\text {n a m e}} + r _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} = \\mathbb {1} [ G = P ] \\in \\{0, 1 \\}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.825, + 0.49, + 0.92 + ], + "angle": 0, + "content": "All other aspects of reward computation are kept identical to those described in Section 3.3. Starting from our original design, which is the most finegrained, we progressively entangle reward components to derive increasingly coarse-grained alternatives." + }, + { + "type": "image", + "bbox": [ + 0.513, + 0.083, + 0.88, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.509, + 0.19, + 0.882, + 0.234 + ], + "angle": 0, + "content": "Figure 11: Correctness reward trends across training steps for Qwen2.5-3B-Instruct with different reward granularity." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.258, + 0.884, + 0.37 + ], + "angle": 0, + "content": "The reward dynamics across training steps, shown in Figure 11, demonstrate that as reward granularity becomes coarser, it becomes harder for the model to achieve higher reward values during RL training. This suggests that overly strict and entangled rewards may lead to sparse learning signals, potentially hindering effective credit assignment." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.371, + 0.884, + 0.483 + ], + "angle": 0, + "content": "Empirical results in Table 7 further support this insight: our original, most finegrained reward strategy performs well across models. In general, finer-grained reward decomposition leads to better training outcomes and higher final task performance, indicating its advantage in promoting more stable and effective policy learning." + }, + { + "type": "image", + "bbox": [ + 0.519, + 0.494, + 0.545, + 0.512 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.547, + 0.495, + 0.866, + 0.591 + ], + "angle": 0, + "content": "Takeaway 3: Finegrained reward decomposition provides richer learning signals, highlighting its role in enabling more effective training compared to coarse reward formulations, which can impede progress and degrade final performance." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.624, + 0.642, + 0.638 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.648, + 0.884, + 0.922 + ], + "angle": 0, + "content": "In this paper, we present a reward design tailored for GRPO training on tool use tasks. Empirically, our model trained from scratch using GRPO consistently outperforms both SFT-based and SFT-initialized RL baselines, as well as models trained with alternative RL algorithms, across a variety of held-out tool use benchmarks. Furthermore, we demonstrate that our model generalizes well to QA settings, exhibiting robust multi-turn interactions, emergent proactiveness, and metacognitive behaviors, all of which are key traits for efficient and adaptable tool use, lying at the core of foundational agent capabilities. Our in-depth analysis of reward types, scaling strategies, granularity, and temporal dynamics provides further insights into how reward shaping influences learning and behavior. We hope these findings serve as a roadmap for future work" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.12, + 0.082, + 0.88, + 0.221 + ], + "angle": 0, + "content": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Finegrained)40.71%78.00%75.55%48.91%2.00%100.00%24.84%
Qwen2.5-1.5B-Instruct (Intermediate)37.65%77.94%72.46%43.00%1.62%100.00%12.45%
Qwen2.5-1.5B-Instruct (Coarse)36.72%76.44%70.86%41.27%2.12%100.00%12.24%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Finegrained)52.06%81.65%79.64%69.21%5.50%83.33%78.14%
Qwen2.5-3B-Instruct (Intermediate)51.36%81.15%80.07%68.64%4.25%88.89%75.74%
Qwen2.5-3B-Instruct (Coarse)51.40%79.48%78.54%68.73%5.62%88.89%77.80%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Finegrained)39.82%64.71%70.68%52.20%0.25%100.00%56.68%
Llama-3.2-3B-Instruct (Intermediate)38.62%59.83%71.86%50.56%0.25%94.44%55.68%
Llama-3.2-3B-Instruct (Coarse)35.95%52.00%61.43%48.96%1.12%83.33%61.92%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.319, + 0.231, + 0.678, + 0.246 + ], + "angle": 0, + "content": "Table 7: BFCL V3 Benchmark Results (Granularity)" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.271, + 0.49, + 0.336 + ], + "angle": 0, + "content": "in applying reinforcement learning to tool use. Ultimately, we envision that reward is all tool learning needs, and that RL offers a powerful path toward generalizable and creative agent behavior." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.364, + 0.214, + 0.379 + ], + "angle": 0, + "content": "References" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.389, + 0.49, + 0.48 + ], + "angle": 0, + "content": "Emre Can Acikgoz, Jeremiah Greer, Akul Datta, Ze Yang, William Zeng, Oussama Elachqar, Emmanuel Koukoumidis, Dilek Hakkani-Tur, and Gokhan Tur. 2025. Can a single model master both multi-turn conversations and tool use? coalm: A unified conversational agentic language model. Preprint, arXiv:2502.08820." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.493, + 0.49, + 0.558 + ], + "angle": 0, + "content": "Jinheon Baek, Sujay Kumar Jauhar, Silviu Cucerzan, and Sung Ju Hwang. 2024. Researchagent: Iterative research idea generation over scientific literature with large language models. arXiv preprint arXiv:2404.07738." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.57, + 0.49, + 0.624 + ], + "angle": 0, + "content": "Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. 2023a. Fireact: Toward language agent fine-tuning. arXiv preprint arXiv:2310.05915." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.635, + 0.49, + 0.688 + ], + "angle": 0, + "content": "Nuo Chen, Hongguang Li, Baoyuan Wang, and Jia Li. 2023b. From good to great: Improving math reasoning with tool-augmented interleaf prompting. arXiv preprint arXiv:2401.05384." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.7, + 0.49, + 0.764 + ], + "angle": 0, + "content": "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W Cohen. 2022. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. arXiv preprint arXiv:2211.12588." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.777, + 0.49, + 0.883 + ], + "angle": 0, + "content": "Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. 2024. Agent-FLAN: Designing data and methods of effective agent tuning for large language models. In *Findings of the Association for Computational Linguistics: ACL* 2024, pages 9354–9366, Bangkok, Thailand. Association for Computational Linguistics." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.894, + 0.49, + 0.921 + ], + "angle": 0, + "content": "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V" + }, + { + "type": "text", + "bbox": [ + 0.527, + 0.272, + 0.885, + 0.325 + ], + "angle": 0, + "content": "Le, Sergey Levine, and Yi Ma. 2025. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.336, + 0.884, + 0.378 + ], + "angle": 0, + "content": "Quy-Anh Dang and Chris Ngo. 2025. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.388, + 0.884, + 0.455 + ], + "angle": 0, + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.466, + 0.884, + 0.531 + ], + "angle": 0, + "content": "Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.544, + 0.884, + 0.61 + ], + "angle": 0, + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.622, + 0.884, + 0.687 + ], + "angle": 0, + "content": "Jiangyong Huang, Silong Yong, Xiaojian Ma, Xiongkun Linghu, Puhao Li, Yan Wang, Qing Li, Song-Chun Zhu, Baoxiong Jia, and Siyuan Huang. 2023. An embodied generalist agent in 3d world. arXiv preprint arXiv:2311.12871." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.7, + 0.884, + 0.779 + ], + "angle": 0, + "content": "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. 2024. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.79, + 0.884, + 0.843 + ], + "angle": 0, + "content": "Yoshitaka Inoue, Tianci Song, and Tianfan Fu. 2024. Drugagent: Explainable drug repurposing agent with large language model-based reasoning. arXiv preprint arXiv:2408.13378." + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.855, + 0.884, + 0.92 + ], + "angle": 0, + "content": "Bowen Jin, Hansi Zeng, Zhenrui Yue, Dong Wang, Hamed Zamani, and Jiawei Han. 2025. Search: Training lms to reason and leverage search engines with reinforcement learning. arXiv preprint arXiv:2503.09516." + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.139 + ], + "angle": 0, + "content": "Minki Kang, Jongwon Jeong, and Jaewoong Cho. 2025. T1: Tool-integrated self-verification for test-time compute scaling in small language models. arXiv preprint arXiv:2504.04718." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.149, + 0.486, + 0.201 + ], + "angle": 0, + "content": "Timo Kaufmann, Paul Weng, Viktor Bengs, and Eyke Hüllermeier. 2023. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.212, + 0.487, + 0.291 + ], + "angle": 0, + "content": "Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025. Lm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.3, + 0.487, + 0.38 + ], + "angle": 0, + "content": "Minghao Li, Yingxiu Zhao, Bowen Yu, Feifan Song, Hangyu Li, Haiyang Yu, Zhoujun Li, Fei Huang, and Yongbin Li. 2023. Api-bank: A comprehensive benchmark for tool-augmented llms. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 3102-3116." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.389, + 0.487, + 0.428 + ], + "angle": 0, + "content": "Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025a. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.439, + 0.487, + 0.478 + ], + "angle": 0, + "content": "Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025b. Torl: Scaling tool-integrated rl. arXiv preprint arXiv:2503.23383." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.488, + 0.486, + 0.542 + ], + "angle": 0, + "content": "Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. 2024. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.552, + 0.487, + 0.629 + ], + "angle": 0, + "content": "Qiqiang Lin, Muning Wen, Qiuying Peng, Guanyu Nie, Junwei Liao, Jun Wang, Xiaoyun Mo, Jiamu Zhou, Cheng Cheng, Yin Zhao, et al. 2024. Hammer: Robust function-calling for on-device language models via function masking. arXiv preprint arXiv:2410.04587." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.64, + 0.487, + 0.719 + ], + "angle": 0, + "content": "Chen Ling, Xujiang Zhao, Jiaying Lu, Chengyuan Deng, Can Zheng, Junxiang Wang, Tanmoy Chowdhury, Yun Li, Hejie Cui, Xuchao Zhang, et al. 2023. Domain specialization as the key to make large language models disruptive: A comprehensive survey. arXiv preprint arXiv:2305.18703." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.729, + 0.487, + 0.795 + ], + "angle": 0, + "content": "Weiwen Liu, Xu Huang, Xingshan Zeng, Xinlong Hao, Shuai Yu, Dexun Li, Shuai Wang, Weinan Gan, Zhengying Liu, Yuanqing Yu, et al. 2024. Toolace: Winning the points of llm function calling. arXiv preprint arXiv:2409.00920." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.805, + 0.487, + 0.857 + ], + "angle": 0, + "content": "Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. Advances in Neural Information Processing Systems, 37:124198-124235." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.867, + 0.487, + 0.919 + ], + "angle": 0, + "content": "Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2023. Gorilla: Large language model connected with massive apis. arXiv preprint arXiv:2305.15334." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.487, + 0.919 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.139 + ], + "angle": 0, + "content": "Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems, 37:126544-126565." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.151, + 0.882, + 0.204 + ], + "angle": 0, + "content": "Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A Smith, and Mike Lewis. 2022. Measuring and narrowing the compositionality gap in language models. arXiv preprint arXiv:2210.03350." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.216, + 0.882, + 0.281 + ], + "angle": 0, + "content": "Cheng Qian, Emre Can Acikgoz, Hongru Wang, Xiusi Chen, Avirup Sil, Dilek Hakkani-Tur, Gokhan Tur, and Heng Ji. 2025. Smart: Self-aware agent for tool overuse mitigation. arXiv preprint arXiv:2502.11435." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.295, + 0.882, + 0.372 + ], + "angle": 0, + "content": "Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. 2023. Creator: Tool creation for disentangling abstract and concrete reasoning of large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 6922-6939." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.386, + 0.882, + 0.452 + ], + "angle": 0, + "content": "Cheng Qian, Peixuan Han, Qinyu Luo, Bingxiang He, Xiusi Chen, Yuji Zhang, Hongyi Du, Jiarui Yao, Xiaocheng Yang, Denghui Zhang, et al. 2024a. Escapebench: Pushing language models to think outside the box. arXiv preprint arXiv:2412.13549." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.464, + 0.882, + 0.568 + ], + "angle": 0, + "content": "Cheng Qian, Chenyan Xiong, Zhenghao Liu, and Zhiyuan Liu. 2024b. Toolink: Linking toolkit creation and using through chain-of-solving on open-source model. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 831-854." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.581, + 0.882, + 0.646 + ], + "angle": 0, + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. 2024a. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.659, + 0.882, + 0.724 + ], + "angle": 0, + "content": "Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Yufei Huang, Chaojun Xiao, Chi Han, et al. 2023. Tool learning with foundation models. arXiv preprint arXiv.2304.08354, 10." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.737, + 0.882, + 0.803 + ], + "angle": 0, + "content": "Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Xuanhe Zhou, Yufei Huang, Chaojun Xiao, et al. 2024b. Tool learning with foundation models. ACM Computing Surveys, 57(4):1-40." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.816, + 0.882, + 0.921 + ], + "angle": 0, + "content": "Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Lauren Hong, Runchu Tian, Ruobing Xie, Jie Zhou, Mark Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024c. Toollm: Facilitating large language models to master 16000+ real-world apis. In The Twelfth International Conference on Learning Representations." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.882, + 0.921 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.165 + ], + "angle": 0, + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.178, + 0.49, + 0.244 + ], + "angle": 0, + "content": "Yusuf Roohani, Andrew Lee, Qian Huang, Jian Vora, Zachary Steinhart, Kexin Huang, Alexander Marson, Percy Liang, and Jure Leskovec. 2024. Biodiscoveryagent: An ai agent for designing genetic perturbation experiments. arXiv preprint arXiv:2405.17631." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.258, + 0.49, + 0.336 + ], + "angle": 0, + "content": "Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettle-moyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.35, + 0.488, + 0.402 + ], + "angle": 0, + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.416, + 0.488, + 0.483 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.496, + 0.488, + 0.562 + ], + "angle": 0, + "content": "Haozhan Shen, Peng Liu, Jingcheng Li, Chunxin Fang, Yibo Ma, Jiajia Liao, Qiaoli Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, et al. 2025. Vlmr1: A stable and generalizable r1-style large vision-language model. arXiv preprint arXiv:2504.07615." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.575, + 0.488, + 0.64 + ], + "angle": 0, + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv:2409.19256." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.655, + 0.488, + 0.721 + ], + "angle": 0, + "content": "Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.735, + 0.488, + 0.801 + ], + "angle": 0, + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.814, + 0.488, + 0.84 + ], + "angle": 0, + "content": "Qwen Team. 2024. Qwen2.5: A party of foundation models." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.854, + 0.488, + 0.92 + ], + "angle": 0, + "content": "Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.92 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.086, + 0.883, + 0.165 + ], + "angle": 0, + "content": "Yidong Wang, Qi Guo, Wenjin Yao, Hongbo Zhang, Xin Zhang, Zhen Wu, Meishan Zhang, Xinyu Dai, Qingsong Wen, Wei Ye, et al. 2024. Autosurvey: Large language models can automatically write surveys. Advances in Neural Information Processing Systems, 37:115119-115145." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.173, + 0.883, + 0.239 + ], + "angle": 0, + "content": "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.248, + 0.883, + 0.313 + ], + "angle": 0, + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.322, + 0.883, + 0.387 + ], + "angle": 0, + "content": "Yining Ye, Xin Cong, Shizuo Tian, Yujia Qin, Chong Liu, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2023. Rational decision-making agent with internalized utility judgment. arXiv preprint arXiv:2308.12519." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.396, + 0.883, + 0.462 + ], + "angle": 0, + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.47, + 0.883, + 0.536 + ], + "angle": 0, + "content": "Yuanqing Yu, Zhefan Wang, Weizhi Ma, Zhicheng Guo, Jingtao Zhan, Shuai Wang, Chuhan Wu, Zhiqiang Guo, and Min Zhang. 2024. Steptool: A step-grained reinforcement learning framework for tool learning in llms. arXiv preprint arXiv:2410.07745." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.544, + 0.883, + 0.622 + ], + "angle": 0, + "content": "Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, et al. 2025. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.632, + 0.883, + 0.711 + ], + "angle": 0, + "content": "Aohan Zeng, Mingdao Liu, Rui Lu, Bowen Wang, Xiao Liu, Yuxiao Dong, and Jie Tang. 2024. AgentTuning: Enabling generalized agent abilities for LLMs. In Findings of the Association for Computational Linguistics: ACL 2024, pages 3053-3077, Bangkok, Thailand. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.719, + 0.883, + 0.772 + ], + "angle": 0, + "content": "Yuanzhao Zhai, Tingkai Yang, Kele Xu, Feng Dawei, Cheng Yang, Bo Ding, and Huaimin Wang. 2024. Enhancing decision-making for llm agents via step-level q-value models. arXiv preprint arXiv:2409.09345." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.781, + 0.883, + 0.846 + ], + "angle": 0, + "content": "Hongxin Zhang, Weihua Du, Jiaming Shan, Qinhong Zhou, Yilun Du, Joshua B Tenenbaum, Tianmin Shu, and Chuang Gan. 2023. Building cooperative embodied agents modularly with large language models. arXiv preprint arXiv:2307.02485." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.854, + 0.883, + 0.92 + ], + "angle": 0, + "content": "Jianguo Zhang, Tian Lan, Ming Zhu, Zuxin Liu, Thai Hoang, Shirley Kokane, Weiran Yao, Juntao Tan, Akshara Prabhakar, Haolin Chen, et al. 2024. xlam: A family of large action models to empower ai agent systems. arXiv preprint arXiv:2409.03215." + }, + { + "type": "list", + "bbox": [ + 0.512, + 0.086, + 0.883, + 0.92 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.153 + ], + "angle": 0, + "content": "Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. Deepresearch: Scaling deep research via reinforcement learning in real-world environments. arXiv preprint arXiv:2504.03160." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.115, + 0.085, + 0.204, + 0.101 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.11, + 0.331, + 0.127 + ], + "angle": 0, + "content": "A User Prompt Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.135, + 0.49, + 0.232 + ], + "angle": 0, + "content": "The system instruction is shown in Figure 4. The user prompt is used to store the trajectory history, including intermediate thoughts, tool calls, environment observations, and any additional user commands. The complete user instruction is presented in Figure 12." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.243, + 0.321, + 0.26 + ], + "angle": 0, + "content": "B Experiment Details" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.268, + 0.488, + 0.414 + ], + "angle": 0, + "content": "Training Data Details. We empirically use 4K data points for training, as each dataset consists of samples drawn from the same distribution. Adding more data of similar nature does not increase task diversity. Moreover, we observe that increasing the dataset size beyond 4K does not yield noticeable improvements in the training convergence or final performance, suggesting diminishing returns from additional data under this setting." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.422, + 0.489, + 0.52 + ], + "angle": 0, + "content": "GRPO Setting Details. For all the tool calls in the dataset, we all use JSON format to represent tool call as it's easy to parse and is the most general and structure way of performing tool call. For the GRPO training, we use 2 A100 (80G) GPUs per run with the following hyper-parameters:" + }, + { + "type": "table", + "bbox": [ + 0.147, + 0.53, + 0.457, + 0.81 + ], + "angle": 0, + "content": "
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length2048
Max Response Length1024
Optimization
Learning Rate1e-6
PPO Mini Batch Size128
KL Loss UsedFalse
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.6
Number of Rollouts4
Training & Logging
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
" + }, + { + "type": "table_caption", + "bbox": [ + 0.155, + 0.818, + 0.446, + 0.833 + ], + "angle": 0, + "content": "Table 8: Configuration for GRPO training." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.857, + 0.49, + 0.922 + ], + "angle": 0, + "content": "Baselines. The 400 selected data points used for SFT share the same distribution as the 4k data points used for RL training, but differ in content. For SFT, each data point includes a field," + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.882, + 0.149 + ], + "angle": 0, + "content": "with thought content distilled from Deepseek-R1 trajectories. In contrast, GRPO does not require ground truth thought, as only the tool calls are used to compute rewards in the GRPO setting." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.15, + 0.884, + 0.279 + ], + "angle": 0, + "content": "We use 400 data points for SFT based on empirical observations that this amount is sufficient to help the raw model learn to follow our tool call format. This provides a stronger initialization and reduces the burden of learning the format from scratch during RL training. However, we also find that relying solely on SFT can lead to overfitting, which may ultimately degrade performance." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.288, + 0.884, + 0.353 + ], + "angle": 0, + "content": "PPO Setting Details. We apply approximately the same parameter settings as GRPO for the PPO training. Similarly, we use 2 A100 (80G) GPUs per run with the following hyper-parameters:" + }, + { + "type": "table", + "bbox": [ + 0.543, + 0.363, + 0.851, + 0.666 + ], + "angle": 0, + "content": "
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length1024
Max Response Length512
Optimization
Actor Learning Rate1e-6
Critic Learning Rate1e-5
PPO Mini Batch Size128
PPO Micro Batch Size8
KL Coefficient0.001
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.3
Training & Logging
Critic Warmup Steps0
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
" + }, + { + "type": "table_caption", + "bbox": [ + 0.557, + 0.675, + 0.834, + 0.69 + ], + "angle": 0, + "content": "Table 9: Configuration for PPO training." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.718, + 0.71, + 0.734 + ], + "angle": 0, + "content": "C Additional Results" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.745, + 0.884, + 0.841 + ], + "angle": 0, + "content": "We present additional results on three benchmarks, applying GRPO and PPO methods to models initialized with SFT on 4K data points. This setting serves as a \"theoretical\" upper bound, since the same 4K data is first used for SFT and subsequently reused for RL training." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.842, + 0.884, + 0.922 + ], + "angle": 0, + "content": "The results are shown in Table 10, Table 11, and Table 12 for BFCL, API-Bank, and Bamboogle, respectively. We compare RL training initialized with models fine-tuned on either 400 or 4K SFT data points." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.082, + 0.823, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.326, + 0.272, + 0.673, + 0.288 + ], + "angle": 0, + "content": "Figure 12: The user prompt used for TIR's rollout." + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.301, + 0.88, + 0.513 + ], + "angle": 0, + "content": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)40.24%66.42%62.02%54.58%2.50%94.12%55.09%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)42.63%66.60%64.77%60.15%1.38%88.89%67.98%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (SFT4k+PPO)48.22%77.75%73.18%64.27%5.25%94.12%66.41%
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.82%75.12%69.52%68.19%2.38%77.78%76.16%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100%14.19%
Qwen2.5-7B-Instruct (SFT4k+PPO)33.80%42.67%49.50%51.80%2.38%77.78%55.79%
Qwen2.5-7B-Instruct (SFT4k+GRPO)35.18%43.58%50.39%55.49%0.87%77.78%67.12%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (SFT4k+PPO)45.41%73.71%68.46%62.27%2.50%82.35%68.75%
Llama-3.2-3B-Instruct (SFT4k+GRPO)45.50%70.69%67.70%64.73%1.00%77.78%78.85%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.294, + 0.523, + 0.704, + 0.538 + ], + "angle": 0, + "content": "Table 10: BFCL V3 Benchmark Results (Additional Result)" + }, + { + "type": "table", + "bbox": [ + 0.12, + 0.551, + 0.508, + 0.768 + ], + "angle": 0, + "content": "
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)61.31%64.91%56.72%52.67%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)59.46%65.16%53.73%45.04%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (SFT4k+PPO)60.13%64.41%44.78%54.96%
Qwen2.5-3B-Instruct (SFT4k+GRPO)60.80%64.41%56.72%51.91%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.4%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (SFT4k+PPO)59.30%61.40%40.30%61.60%
Qwen2.5-7B-Instruct (SFT4k+GRPO)52.60%56.39%34.33%50.38%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (SFT4k+PPO)54.10%60.65%40.30%41.22%
Llama-3.2-3B-Instruct (SFT4k+GRPO)50.92%59.15%34.33%34.35%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.131, + 0.776, + 0.492, + 0.79 + ], + "angle": 0, + "content": "Table 11: API-Bank Test Results (Additional Result)" + }, + { + "type": "table", + "bbox": [ + 0.536, + 0.551, + 0.882, + 0.768 + ], + "angle": 0, + "content": "
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (SFT4k+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)34.4%1.02
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (SFT4k+PPO)46.4%1.01
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.2%0.98
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (SFT4k+PPO)40.0%1.25
Qwen2.5-7B-Instruct (SFT4k+GRPO)32.0%1.25
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (SFT4k+PPO)49.6%1.02
Llama-3.2-3B-Instruct (SFT4k+GRPO)42.4%1.03
" + }, + { + "type": "table_caption", + "bbox": [ + 0.531, + 0.777, + 0.885, + 0.806 + ], + "angle": 0, + "content": "Table 12: Bamboogle Test Results (Additional Result)" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.832, + 0.491, + 0.912 + ], + "angle": 0, + "content": "Interestingly, our findings suggest that initializing from a model finetuned on 4K data does not consistently outperform initialization from a model finetuned on only 400 data points. In the BFCL benchmark, we even observe cases where perfor" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.832, + 0.885, + 0.913 + ], + "angle": 0, + "content": "mance drops below that of the raw instruct model. This counterintuitive result may stem from overfitting during the SFT phase, which could restrict the model's ability to explore during RL training and lead to poorer generalization on held-out tasks." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_origin.pdf b/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..28e8dab28b13f3bf6943873c73926816e2f7ef68 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/a08029cb-87cf-4923-9173-7614aa70a4ec_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:00ac35b687b54c9272b7cffef011659720b5658fd7beed72c9fb3f0704a6650d +size 2405904 diff --git a/data/2025/2504_13xxx/2504.13958/full.md b/data/2025/2504_13xxx/2504.13958/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8a01b8bd853d0235879335f864ba3c69b5ebd565 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/full.md @@ -0,0 +1,704 @@ +# ToolRL: Reward is All Tool Learning Needs + +# Cheng Qian, Emre Can Acikgoz, Qi He, Hongru Wang, Xiusi Chen, Dilek Hakkani-Tür, Gokhan Tur, Heng Ji + +University of Illinois Urbana-Champaign + +{chengq9, hengji}@illinois.edu + +# Abstract + +Current Large Language Models (LLMs) often undergo supervised fine-tuning (SFT) to acquire tool use capabilities. However, SFT struggles to generalize to unfamiliar or complex tool use scenarios. Recent advancements in reinforcement learning (RL), particularly with R1-like models, have demonstrated promising reasoning and generalization abilities. Yet, reward design for tool use presents unique challenges: multiple tools may be invoked with diverse parameters, and coarse-grained reward signals, such as answer matching, fail to offer the fine-grained feedback required for effective learning. In this work, we present the first comprehensive study on reward design for tool selection and application tasks within the RL paradigm. We systematically explore a wide range of reward strategies, analyzing their types, scales, granularity, and temporal dynamics. Building on these insights, we propose a principled reward design tailored for tool use tasks and apply it to train LLMs using Group Relative Policy Optimization (GRPO). Empirical evaluations across diverse benchmarks demonstrate that our approach yields robust, scalable, and stable training, achieving a $17\%$ improvement over base models and a $15\%$ gain over SFT models. These results highlight the critical role of thoughtful reward design in enhancing the tool use capabilities and generalization performance of LLMs. All the code are released to facilitate future research. $^{1}$ + +# 1 Introduction + +Recent advances in Large Language Models (LLMs) have showcased remarkable capabilities in complex reasoning tasks (Kumar et al., 2025). Among the techniques that have significantly contributed to this progress, Reinforcement Learning (RL) has emerged as a powerful paradigm, enabling + +# Task Goal: Irrelevant Tool Detection (LLM should reject in appropriate tools) + +![](images/5f522180c398c809905b7e102f535a0a6044a6775b228ce7b2816e62b8244824.jpg) +Figure 1: SFT on distilled deep-thinking trajectories suffers from overthinking and limited generalization. + +# What's the distance between San Francisco and Los Angeles in kilometers? + +# {Name: get_date, Parameters: {location_1: str, location_2: str, unit: str}} + +# SFT Model + +# (Distilled from R1 Long Thought) + + The user wants to find the distance in kilometers between San Francisco and Los Angeles. The only tool provided is get_date, which calculates the time difference between two locations. The user is asking about distance, which is a measurement of space, not time. But wait, maybe there's a + +space, not often. But, maybe it's a misunderstanding Over-Interpretation! perhaps + +the tool can still be used if possible. Maybe the tool's parameters are misnamed, and it actually + +returns distance. Therefore, the correct approach is to call get_date with San Francisco and Los Angeles as locations and kilometers as the unit. + + {Name: get date, Parameters: + +location 1: San Francisco, location 2: Los + +$\text{Angeles,unit:kilometers}\} \} < /$ tool_call> + +# RL Model + +# (Trained through GRPO) + + The user wants to find the distance between San Francisco and Los Angeles in kilometers. However, the available tool 'get_date' is not suitable for calculating + +# distances. + +The tool get_date'is not suitable for calculating the distance between two cities.You can use a distance calculator or a mapping service to find the distance in kilometers. + +LLMs to develop emergent capabilities such as self-reflection, self-correction, and long-horizon planning (Guo et al., 2025; Team et al., 2025). These capabilities have been instrumental in the success of models like o1 and R1, particularly in mathematical and logical reasoning domains (Qin et al., 2024a; Huang et al., 2024; Li et al., 2025b; Kang et al., 2025). + +Beyond traditional reasoning tasks, an increasingly important area is Tool-Integrated Reasoning (TIR). TIR involves LLMs interacting with external tools, such as search engines (Jin et al., 2025; Zheng et al., 2025), calculators (Chen et al., 2023b; Qin et al., 2023), or code interpreters (Gou et al., 2023; Liao et al., 2024), in a multi-step, feedback-driven loop to arrive at solutions. TIR is particularly important because it addresses core limitations of LLMs, such as outdated knowledge, calculation inaccuracy, and shallow reasoning. By integrating external tools that offer real-time access and specialized capabilities, TIR enables models to tackle complex tasks in a more grounded and goal-directed way. + +Unlike textual reasoning, which primarily involves deduction and inference from static text, + +![](images/d9d1c4a75b58af36c4b5eb4342807cfac4846cca8d8a8fc7880fdfaabb685a3c.jpg) + +![](images/5420e28d1d217f54de626224d6cf029c9d3c8b75190b3803174eadbd95a48981.jpg) + +![](images/52af26f9a24c98cd2e0c9763ebf10c2cc361be5015d3ce094e8fa5770096f803.jpg) +Figure 2: Main results (left) and reward trends over training steps for GRPO Cold Start across four models (right). GRPO Cold Start, equipped with our proposed reward design, consistently achieves the highest performance, with reward curves showing a rapid increase during training. + +![](images/f9ea052946be1f03ba66dd491116a8bea9bad8a566a674459368a7eceb182187.jpg) + +![](images/c3a015e002e06dde8110a53dbf637aed44e0ebca22ae6f9b69351fadc73cf144.jpg) + +TIR additionally demands the model's ability to select appropriate tools, interpret intermediate outputs, and adaptively refine its trajectory on the fly. These dynamic and interactive reasoning skills position TIR at the core of the emerging paradigm of LLMs-as-agents. As such, TIR enables a wide range of applications, including scientific discovery (Roohani et al., 2024; Inoue et al., 2024), research automation (Baek et al., 2024; Wang et al., 2024), embodied task completion (Zhang et al., 2023; Huang et al., 2023), and everyday decision-making (Ye et al., 2023; Zhai et al., 2024). + +Training LLMs for TIR tasks has predominantly relied on Supervised Fine-Tuning (SFT), wherein existing approaches typically generate these integrated reasoning steps offline, followed by subsequent SFT on these trajectories (Chen et al., 2023a; Zeng et al., 2024; Chen et al., 2024; Acikgoz et al., 2025). While SFT is effective to some extent, it struggles with generalization, exploration, and adaptability (Chu et al., 2025; Guo et al., 2025). As illustrated in Figure 1, a model trained with SFT on deep-thinking trajectories over-interprets the tool and fails to reject the inappropriate tool, merely imitating cues like "but wait" without engaging in genuine deep thinking. As such, SFT often fails to capture the strategic flexibility needed for optimal tool use, particularly in open-ended or multi-step settings. This motivates a fundamental research question: Can RL-based training methods better equip LLMs with agentic tool-using capabilities, + +and if so, what is the optimal RL design for TIR? + +Recent efforts such as Search-R1 (Jin et al., 2025) and TORL (Li et al., 2025b) have begun to explore this direction. However, their focus is narrow: either constrained to search tools in question answering settings or code tools in math problem-solving. In contrast, our work aims to study RL-based training for general-purpose tool selection and application, across diverse and complex tool sets with different task types. + +For an RL algorithm to be effective, a well-designed reward is essential. Unlike math tasks with a single correct answer, Tool-Integrated Reasoning (TIR) tasks introduce multiple layers of complexity: they often involve multi-step interactions where each turn may require invoking multiple tools, each with carefully specified parameters. Designing effective reward signals to guide learning through this complexity remains an open and underexplored challenge. In this paper, we focus on the problem of reward design for TIR and propose a principled, generalizable framework that can be applied across various RL algorithms. While our reward design is algorithm-agnostic by nature, we empirically demonstrate its effectiveness using both Group Relative Policy Optimization (GRPO) (Shao et al., 2024) and Proximal Policy Optimization (PPO) (Schulman et al., 2017), showcasing its versatility and impact on improving tool use performance. + +We begin by formalizing the TIR task, and out + +lining general principles for effective reward design. Building on this foundation, we show how RL algorithm can be leveraged to train LLMs for robust and context-aware tool selection and application. Empirical results demonstrate that our approach outperforms base models by $17\%$ and SFT models by $15\%$ across multiple tool use and QA benchmarks. Moreover, the trained model exhibits strong generalization to unseen scenarios and task objectives, along with emergent behaviors such as proactiveness and metacognitive reasoning. + +To identify optimal reward strategies, we next systematically explore a broad spectrum of reward configurations across four key dimensions: (1) reward type (what aspect to reward), (2) reward scale (how much to reward), (3) reward granularity (how detailed the reward signal is), and (4) reward dynamics (how rewards evolve over time). Through extensive experiments, we identify reward designs that best align with agentic tool use and uncover insights into what makes a reward "useful" for tool invoking LLMs. We summarize the core insights we derive as follows: + +- Longer reasoning trace is not inherently better and length rewards can degrade performance. +- Dynamic reward scale helps models transition smoothly from simple to complex behaviors. +- Finegrained reward decomposition leads to more stable and effective learning. + +We also summarize the overall contributions of our paper as follows: + +- We present the first systematic study on RL-based training for general-purpose tool selection and application in LLMs. +- We propose a principled reward design framework tailored for TIR and validate its effectiveness through RL algorithms including GRPO. +- We conduct extensive experiments analyzing the effects of various reward strategies and distill actionable insights for future research on LLM-agent training. + +This work pioneers the application of RL to general TIR and provides the first empirical roadmap for reward design in TIR, paving the way toward more capable and autonomous LLM agents. + +# 2 Related Work + +Tool-Integrated Reasoning of LLMs. Tool-integrated reasoning (TIR) has emerged as a promising approach to enhance the capabilities of + +LLMs. Early studies introduced the concept of equipping LLMs with external tools to overcome their inherent limitations (Schick et al., 2023; Qin et al., 2024b; Yao et al., 2023), such as program executors (Chen et al., 2022) and search engines (Vu et al., 2023). To systematically assess these enhanced capabilities, several benchmarks have been proposed to evaluate tool use performance across various dimensions, including API selection, argument generation, and generalization (Qin et al., 2024c; Patil et al., 2023; Qian et al., 2024a). Building on this foundation, subsequent research has focused on constructing high-quality tool use datasets (Liu et al., 2024; Qian et al., 2025), enabling models to autonomously create and invoke tools (Qian et al., 2023, 2024b), and applying these techniques to problems spanning different modalities (Shen et al., 2025) and specialized domains (Ling et al., 2023). More recently, reinforcement learning (RL) has been explored as an effective framework to further improve TIR, demonstrating success in tasks such as information retrieval (Jin et al., 2025) and math computation (Li et al., 2025b). These advances collectively highlight the growing potential of tool-augmented LLMs for general-purpose reasoning in open-domain settings. + +Exploration of RL in LLMs. Previous work has primarily relied on supervised fine-tuning (SFT) with carefully curated datasets to enhance LLM performance in tool use (Schick et al., 2023; Qin et al., 2024c). Recently, reinforcement learning (RL) has gained traction as a more scalable and generalizable training paradigm. The development of RL methods for LLMs has evolved from reinforcement learning from human feedback (RLHF) (Kaufmann et al., 2023) and proximal policy optimization (PPO) (Schulman et al., 2017) to more advanced techniques such as direct preference optimization (DPO) (Rafailov et al., 2023), SimPO (Meng et al., 2024), and group relative policy optimization (GRPO) (Shao et al., 2024). Extensions like dynamic sampling policy optimization (DAPO) (Yu et al., 2025) and the more recent value-based augmented proximal policy optimization (VAPO) (Yuan et al., 2025) further improve training stability and efficiency. + +Among these, GRPO (Shao et al., 2024) is specifically designed for LLMs, replacing the traditional critic with a group-based evaluation strategy. It has shown strong performance in enhancing reasoning abilities across a range of tasks, including math- + +ematical problem solving (Shao et al., 2024; Xie et al., 2025), search engine interaction (Jin et al., 2025; Song et al., 2025), and code generation (Li et al., 2025b). Beyond task variety, recent studies have analyzed the influence of dataset scale (Li et al., 2025a) and GRPO's effectiveness in smaller model settings (Dang and Ngo, 2025). GRPO's flexible reward function enables adaptation to diverse objectives, such as assigning weights to subtasks (Yu et al., 2024) or constraining tool use frequency (Li et al., 2025b). In this work, we extend GRPO to enhance general tool use capabilities, improving LLMs' ability to select and interact with external tools across a wide range of scenarios. + +# 3 Method + +Supervised fine-tuning (SFT), as illustrated in Figure 1, often suffers from overfitting to certain patterns and constrains the model's ability to learn optimal strategies for tool use. To address this, we introduce a reinforcement learning (RL) approach for enhancing tool-integrated reasoning (TIR) in LLMs. In this section, we begin by defining the TIR task (Section 3.1), followed by our customized rollout strategy (Section 3.2) and reward design (Section 3.3). These components are then integrated into the Group Relative Policy Optimization (GRPO) framework (Shao et al., 2024) to guide model training on general TIR tasks (Section 3.4). + +# 3.1 Task Definition + +Tool-Integrated Reasoning (TIR) is the process of incorporating external tools into the reasoning trajectory of an LLM to solve a user task. A typical TIR trajectory involves multiple tool invocations over several reasoning steps, with the final outcome determined by the cumulative success of these intermediate decisions. + +Formally, given a tool set $\mathcal{T} = \{t_1,t_2,\dots ,t_n\}$ containing $n$ available tools, and a user query $Q$ the reasoning trajectory up to step $k$ is denoted as: + +$$ +s _ {k} = (r _ {1}, \mathcal {T} _ {1}, o _ {1}), (r _ {2}, \mathcal {T} _ {2}, o _ {2}), \ldots , (r _ {k}, \mathcal {T} _ {k}, o _ {k}), +$$ + +where $r_i$ denotes the model's natural language reasoning at step $i$ , $\mathcal{T}_i \subseteq \mathcal{T}$ denotes the set of tool calls invoked at step $i$ , and $o_i$ denotes the observation received after executing tools in $\mathcal{T}_i$ , possibly including both environment and user feedback. + +At each step $k + 1$ , the model must generate the next reasoning step $r_{k + 1}$ , select a set of tools $\mathcal{T}_{k + 1} \subseteq \mathcal{T}$ , and formulate a grounded tool call (i.e., + +a parameterized invocation of each tool) to make progress toward solving $Q$ . + +The model's policy is defined as $\pi : s_k \to (r_{k+1}, \mathcal{T}_{k+1})$ , where the model's objective at each step is to select a tool set $\mathcal{T}_{k+1}$ that maximizes the immediate reward: + +$$ +\mathcal {T} _ {k + 1} ^ {*} = \arg \max _ {\mathcal {T} _ {k + 1} \subseteq \mathcal {T}} R (s _ {k}, \mathcal {T} _ {k + 1}, o _ {k + 1}), +$$ + +where $R(\cdot)$ represents the reward function that evaluates progress made by invoking the tools in $\mathcal{T}_{k + 1}$ . + +While the immediate reward at each step is maximized, the model's policy is implicitly optimized to maximize the cumulative reward over the entire trajectory, formulated as: + +$$ +\max _ {\pi} \mathbb {E} _ {\pi} \left[ \sum_ {k = 1} ^ {K} R \left(s _ {k}, \mathcal {T} _ {k + 1}, o _ {k + 1}\right) \right], +$$ + +This formulation is valid because our training data includes ground truth tool calls at each step, allowing step-wise reward signals to guide multi-step success. Unlike QA tasks that focus solely on the final answer, tool selection and application tasks provide dense intermediate feedback. Moreover, we later demonstrate that our method enables the model to generalize to settings where tool calls are free-form and only the final outcome matters. Therefore, out task setting encourages the model to optimize tool use at each step while aligning with the overall task goal. + +# 3.2 TIR Rollout + +To enable the model to autonomously generate reasoning traces and tool calls, we utilize a system prompt as shown in Figure 4 during rollout. The Tool List placeholder denotes the tool set $\mathcal{T}$ , which contains all tools available for invocation. We indicate in the instruction that the LLM should use special tokens , , and to indicate their thoughts, tool calls and responses in output. + +As illustrated in Figure 3, when the model output includes , we automatically parse the tool calls into individual invocations using the model-predicted parameters. The outputs from executions are then inserted into the field and appended to the dialogue history, whose format is shown in Figure 12, serving as the model's interaction trajectory. Similarly, if the output contains , the corresponding response is parsed and appended to the dialogue history. + +![](images/68ae308fe4201a034cd5d0b2d3ae170be90697a5c0d2f6f3f221fa3f622dbd0d.jpg) +Figure 3: Illustration of TIR rollout and calculation of format and correctness reward. + +It is important to note that and are not mutually exclusive; they may co-occur within a single output. The user's initial query $Q$ is placed in the Initial User Input placeholder, and any subsequent user inputs are also appended to the dialogue history when present. + +# 3.3 Reward Design + +Rule-based reward mechanisms have demonstrated strong empirical performance and are commonly employed. In our training, we similarly adopt a reward formulation that combines structural and correctness-based components, in line with prior works (Jin et al., 2025; Li et al., 2025b; Xie et al., 2025). Specifically, the format reward assesses whether the model output adheres to the expected structure including thoughts, tool calls, and responses, while the correctness reward evaluates the accuracy of tool invocations. Formally, the overall reward $R_{\mathrm{final}}(\cdot)$ is decomposed into two components: $R_{\mathrm{format}} + R_{\mathrm{correct}}$ , each described in detail below: + +Format Reward. The format reward $\mathcal{R}_{\mathrm{format}} \in \{0,1\}$ checks whether the model output contains all required special tokens in the correct order as specified by the ground truth: + +$$ +\mathcal {R} _ {\text {f o r m a t}} = \left\{ \begin{array}{l l} 1, & \text {i f a l l r e q u i r e d f i e l d s a p p e a r} \\ & \text {a n d a r e i n t h e c o r r e c t o r d e r} \\ 0, & \text {o t h e r w i s e} \end{array} \right. +$$ + +Correctness Reward. The correctness reward $\mathcal{R}_{\mathrm{correct}} \in [-3, 3]$ evaluates predicted tool calls $P = \{P_1, \dots, P_m\}$ against ground-truth calls $G = \{G_1, \dots, G_n\}$ . It includes three components: + +- Tool Name Matching: + +$$ +r _ {\text {n a m e}} = \frac {\left| N _ {G} \cap N _ {P} \right|}{\left| N _ {G} \cup N _ {P} \right|} \in [ 0, 1 ] +$$ + +where $N_{G}$ and $N_{P}$ are the sets of tool names extracted from the ground-truth and predicted tool calls, respectively. + +Parameter Name Matching: + +$$ +r _ {\text {p a r a m}} = \sum_ {G _ {j} \in G} \frac {| \mathrm {k e y s} (P _ {G}) \cap \mathrm {k e y s} (P _ {P}) |}{| \mathrm {k e y s} (P _ {G}) \cup \mathrm {k e y s} (P _ {P}) |} \in [ 0, | G | ] +$$ + +where $\mathrm{keys}(P_G)$ and $\mathrm{keys}(P_P)$ represent the parameter names of the predicted and ground-truth tool calls, respectively. + +Parameter Content Matching: + +$$ +\begin{array}{l} r _ {\text {v a l u e}} = \sum_ {G _ {j} \in G} \sum_ {k \in \text {k e y s} (G _ {j})} \mathbb {1} \left[ P _ {G} [ k ] = P _ {P} [ k ] \right] \\ \in [ 0, \sum_ {G _ {j} \in G} | \mathrm {k e y s} (G _ {j}) | ] \\ \end{array} +$$ + +where $P_{G}[k]$ and $P_{P}[k]$ represent the values of the parameters for the predicted and ground truth tool calls. + +- Total match score for each match is: + +$$ +r _ {\text {m a t c h}} = r _ {\text {n a m e}} + r _ {\text {p a r a m}} + r _ {\text {v a l u e}} \in [ 0, S _ {\max} ] +$$ + +where $S_{\mathrm{max}} = 1 + |G| + \sum_{G_j \in G} |\mathrm{keys}(G_j)|$ denotes the maximum possible score. + +The total score is computed by finding the optimal matching between $P$ and $G$ to maximize the total match score: + +$$ +\mathcal {R} _ {\text {c o r r e c t}} = 6 \cdot \frac {R _ {\max}}{S _ {\max}} - 3 \in [ - 3, 3 ] +$$ + +where $R_{\mathrm{max}}$ denotes the total match score from the optimal matching. The final correctness reward + +# System Prompt for Training + +You are a helpful dialogue assistant capable of leveraging tool calls to solve user tasks and provide structured chat responses. + +# Available Tools + +In your response, you can use the following tools: + +{{ToolList}} + +# Steps for Each Turn + +1. Think: Recall relevant context and analyze the current user goal. +2. Decide on Tool Usage: If a tool is needed, specify the tool and its parameters. +3. Respond Appropriately: If a response is needed, generate one while maintaining consistency across user queries. + +# Output Format + +```txt + Your thoughts and reasoning + +{“name”: “Tool name”, “parameters”: {“Parameter name”: “Parameter content”, “... ...”: “... ...”} +{“name”: “... ...”, “parameters”: {“... ...”: “... ...”, “... ...”: “... ...”} +... + + AI's final response +``` + +# Important Notes + +1. You must always include the field to outline your reasoning. Provide at least one of or . Decide whether to use (possibly multiple times), , or both. +2. You can invoke multiple tool calls simultaneously in the fields. Each tool call should be a JSON object with a "name" field and a "parameters" field containing a dictionary of parameters. If no parameters are needed, leave the "parameters" field an empty dictionary. +3. Refer to the previous dialogue records in the history, including the user's queries, previous , , and any tool feedback noted as (if exists). + +Figure 4: The system prompt used for TIR's rollout. + +$\mathcal{R}_{\mathrm{correct}}$ is the normalized reward for the matching process. We empirically set the reward scale within the range of $[-3, 3]$ , with more analysis and ablations of reward scale presented in Section 5. + +The final reward value $\mathcal{R}_{\mathrm{final}}$ is finally derived as the sum of $\mathcal{R}_{\mathrm{format}}$ and $\mathcal{R}_{\mathrm{correct}}$ : + +$$ +\mathcal {R} _ {\text {f i n a l}} = \mathcal {R} _ {\text {f o r m a t}} + \mathcal {R} _ {\text {c o r r e c t}} \in [ - 3, 4 ] +$$ + +Unlike prior works that often rely on binary or overly simplified reward signals, our design captures the nuanced structure of tool calls by evaluating multiple interdependent components including tool names, parameter schemas, and parameter values. This fine-grained formulation better reflects the complexity of real-world tool use, where correctness cannot be reduced to a single binary criterion. We further validate the impact of this design through comprehensive analysis in Section 5. + +Overall, our reward design ensures a balanced and interpretable evaluation signal by explicitly + +separating structural compliance from semantic correctness. By aligning rewards with both format adherence and fine-grained tool call accuracy, the model is guided to produce outputs that are not only syntactically valid but also semantically faithful, which is crucial for downstream tool execution and final task success. + +# 3.4 RL Training with GRPO + +To tune the model with structured rewards, we employ GRPO, a variant of PPO that introduces advantage normalization within grouped samples. This normalization helps stabilize training by reducing variance across samples that share a common input context. Let $\pi_{\theta}$ represent the current policy. + +# Normalized Advantage Across Query Groups. + +For each query $Q$ , its responses derived from the rollout form a group $G_{Q}$ consisting of multiple + +responses and their corresponding reward values: + +$$ +G _ {Q} = \left\{A, \left(s _ {1}, r _ {1}\right), \left(s _ {2}, r _ {2}\right), \dots , \left(s _ {n}, r _ {n}\right) \right\} +$$ + +where $A$ denotes the ground-truth annotation for $Q$ , and each reward $r_i$ is computed as the sum of the format and correctness rewards associated with response $s_i$ , i.e., $r_i = \mathcal{R}_{\mathrm{format}}(s_i, A) + \mathcal{R}_{\mathrm{correct}}(s_i, A)$ . For each group, we calculate the mean and standard deviation of the rewards: + +$$ +\mu_ {Q} = \frac {1}{n} \sum_ {i = 1} ^ {n} r _ {i}, \quad \sigma_ {Q} = \sqrt {\frac {1}{n} \sum_ {i = 1} ^ {n} (r _ {i} - \mu_ {Q}) ^ {2}} +$$ + +Then, for each sample $s_i$ in the group, we define the normalized advantage: + +$$ +A _ {i} (s _ {i} | Q) = \frac {r _ {i} - \mu_ {Q}}{\sigma_ {Q} + \eta} +$$ + +where $\eta$ is a constant to avoid division by zero. + +Policy Optimization Objective. The policy $\pi_{\theta}$ is optimized using the standard clipped PPO objective, adapted with our group-wise normalized advantages: + +$$ +\begin{array}{l} J _ {\mathrm {G R P O}} (\theta) = \mathbb {E} _ {Q \sim \mathcal {D}} \mathbb {E} _ {s _ {i} \sim \pi_ {\theta}} \left[ \min \left(\frac {\pi_ {\theta} (s _ {i} | Q)}{\pi_ {\mathrm {o l d}} (s _ {i} | Q)} A _ {i} (s _ {i} | Q), \right. \right. \\ \left. \operatorname {c l i p} \left(\frac {\pi_ {\theta} (s _ {i} | Q)}{\pi_ {\mathrm {o l d}} (s _ {i} | Q)}, 1 - \epsilon , 1 + \epsilon\right) A _ {i} (s _ {i} | Q)\right) \Bigg ] \\ \end{array} +$$ + +Unlike the original GRPO formulations, we omit the KL penalty term against a reference model. This design choice encourages the model to more freely adapt its behavior to our custom response format and structured reward signals. In practice, we observe that this leads to faster convergence and comparable performance, while also simplifying the training pipeline. + +Overall, this objective guides the policy to generate structurally consistent and semantically accurate tool calls, while group-wise normalization mitigates reward variance across queries, leading to more stable and sample-efficient alignment with task-specific response requirements. + +# 4 Experiments + +# 4.1 Training Dataset + +To support robust tool learning through RL, we construct a mixed dataset spanning diverse tool use scenarios: + +- ToolACE (Liu et al., 2024): A general tool use dataset where the model learns when to invoke tools versus respond directly, improving decision-making in multi-step interactions. +- Hammer (Masked) (Lin et al., 2024): A subset of Hammer with randomized tool and parameter names, forcing the model to rely on descriptions rather than memorized labels, thus enhancing generalization and reducing overfitting to certain tools. +- xLAM (Zhang et al., 2024): A compositional dataset requiring one or multiple tool calls per turn, encouraging the model to reason about tool dependencies and plan diverse tool calling action actively. + +For RL training, we sample 2K examples from ToolACE and 1K each from Hammer and xLAM, creating a balanced dataset spanning diverse levels of complexity and tool use. Multi-step trajectories are decomposed into single-step instances, with prior dialogue history injected into the user prompt (as shown in Figure 12) to preserve context. This setup encourages strategic exploration and teaches the model to select and apply tools appropriately within each step. Please see Appendix B for more details and justifications. + +# 4.2 Experiment Settings + +Training. We conduct all RL experiments using the veRL framework (Sheng et al., 2024), adopting the GRPO algorithm detailed in the previous section. For each training step, we sample a batch of 512, and generate 4 responses per query, training for 15 epochs in total (see Appendix B for full configuration details). To encourage broader policy exploration, we remove KL regularization and apply a generation temperature of 1.0. We initialize our models with the Qwen-2.5-Instruct (Team, 2024) and Llama-3.2-Instruct (Dubey et al., 2024) series, which are further tuned under the GRPO objective with our customized reward design. + +Evaluation. We evaluate our approach on the Berkeley Function Call Leaderboard (BFCL) (Patil et al., 2024), a comprehensive benchmark that spans a diverse set of challenges, including single-step reasoning, multi-step tool use, real-time execution, irrelevant tool rejection, simultaneous multi-tool selection, and multi-tool application2. In addition, we present results on API- + +Bank (Li et al., 2023), a three-level evaluation framework comprising 73 diverse and complex API tools. It assesses an LLM's ability to select and apply tools through natural multi-turn dialogues, across three levels of difficulty. We also evaluate on a representative QA benchmark Bamboogle (Press et al., 2022), which comprises a variety of question-answering tasks where performance is measured based on the final answer accuracy rather than the correctness of tool use. These broad coverage makes our evaluation setting effective for evaluating real-world LLM tool use proficiency. All results are reported in terms of accuracy. + +Baselines. We compare our approach against several baselines to better isolate the effects of GRPO training: (1) Raw Instruct Model: the original model without any additional fine-tuning or RL, evaluated using the same prompts. (2) SFT on RL Data: the instruct model fine-tuned using the same 4K / selected 400 data points as the RL training set, providing a comparison point to assess whether GRPO training outperforms standard SFT. (3) GRPO on SFT Model: GRPO is applied to a model that has already undergone SFT on the selected 400 data points. This setup allows us to evaluate the impact of initializing GRPO with a format-aware model, in contrast to starting from the raw instruct model in a cold start manner. (4) PPO: We also include the standard PPO setting as a baseline to evaluate whether our reward design is effective beyond GRPO. We report results for both a cold start PPO model and a PPO model initialized with SFT, using the same hyperparameters as in the GRPO setup for a fair comparison. Please refer to Appendix B for more details and justifications. + +# 4.3 Results + +Main Results. We report BFCL and API-Bank results in Table 1 and Table 2, respectively. Our GRPO method, trained from scratch on the Qwen2.5-Instruct series, generally outperforms other baselines, achieving $\tilde{10}\%$ absolute gains over SFT trained on the same data volume. In contrast, LLaMA-3.2-Instruct shows less improvement, possibly due to the model's lower adaptability to GRPO-style generalization. Nevertheless, it remains competitive and outperforms most baselines on API-Bank. + +SFT Initialization Impacts. Interestingly, GRPO also improves models initialized with limited SFT, often outperforming full-scale SFT + +![](images/d8dc3638bde2b89fa52a2572854cae623f195a7bdeaf5d9b387e540c7b3b470f.jpg) +(a) Format Reward + +![](images/86ab19a0b1d508548569e0402244154029f504b76c0052ae772036ac379e63d7.jpg) +(b) Correctness Reward + +![](images/7ac0f46c2122b5016b913482eafa9455530e12499baa0d34caae98e0d8b3c074.jpg) +Figure 5: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different model initialization strategies. +(a) Format Reward +Figure 6: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different RL strategies (GRPO v.s. PPO). + +![](images/94ced1cd69372f6e86341575a504e8b4cc1f2bf372c3c145a6fdae3920f6dcdb.jpg) +(b) Correctness Reward + +trained on 10 times more data. However, this setup still underperforms compared to cold start GRPO. We hypothesize that SFT initialization leads to memorization and overfitting, which reduces the impact of GRPO's effectiveness in generalization. As shown in Figure 5, SFT-initialized models achieve higher training rewards due to distributional alignment between SFT and RL data, but empirically generalize worse on the two benchmarks. This further highlights that higher training rewards do not necessarily translate to better generalization. + +Reward Design on PPO. We also evaluate PPO under both cold start and SFT-initialized settings to examine the effectiveness of our reward design. The results show that while PPO with a cold start can outperform SFT in some cases, it tends to be less stable across different model settings. In contrast, GRPO consistently achieves higher rewards even from a cold start, suggesting that our reward design is partially effective for PPO but works best in the GRPO framework. As shown in Figure 6, GRPO not only achieves higher correctness rewards but also gains format rewards more rapidly during training. Interestingly, PPO benefits from SFT initialization, generally yielding better results than a cold start, whereas GRPO performs better + +
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400)40.21%65.12%61.11%56.69%1.00%94.44%60.14%
Qwen2.5-1.5B-Instruct (SFT4k)40.67%59.94%59.84%59.31%1.00%88.89%71.34%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (PPO Cold Start)38.32%79.40%70.11%45.24%0.87%100.00%18.09%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400)34.08%69.29%61.50%41.40%0.00%94.44%8.11%
Qwen2.5-3B-Instruct (SFT4k)41.97%62.85%54.73%59.17%0.75%77.78%75.12%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (PPO Cold Start)51.15%82.42%78.52%67.78%4.88%94.12%73.87%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400)34.08%69.29%66.68%41.4%0.00%94.44%8.11%
Qwen2.5-7B-Instruct (SFT4k)36.53%45.15%53.5%57.13%0.75%72.22%72.32%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100.00%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100.00%14.19%
Qwen2.5-7B-Instruct (PPO Cold Start)46.68%79.33%78.16%63.17%0.38%88.89%52.92%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)58.38%86.17%78.25%74.9%18.12%83.33%76.68%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400)41.22%64.27%62.18%58.37%0.75%66.67%71.12%
Llama-3.2-3B-Instruct (SFT4k)44.16%65.42%67.02%63.04%1.38%77.78%78.25%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (PPO Cold Start)42.98%84.00%72.00%52.80%2.88%100.00%31.94%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
+ +Table 1: BFCL V3 Benchmark Results (Main Result) + +
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400)53.60%57.14%50.75%44.27%
Qwen2.5-1.5B-Instruct (SFT4k)47.07%52.88%52.24%26.72%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (PPO Cold Start)40.54%44.61%31.34%32.82%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)63.15%70.68%61.19%41.22%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400)52.76%59.65%50.75%32.82%
Qwen2.5-3B-Instruct (SFT4k)50.92%55.64%43.28%40.46%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (PPO Cold Start)57.62%64.66%59.70%35.11%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)67.00%73.43%67.16%47.33%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400)50.59%55.89%50.75%34.35%
Qwen2.5-7B-Instruct (SFT4k)47.07%51.13%34.33%41.22%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.40%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (PPO Cold Start)61.64%68.67%44.78%48.85%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)64.66%73.93%61.19%38.17%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400)52.76%60.65%35.82%37.40%
Llama-3.2-3B-Instruct (SFT4k)43.89%53.88%29.85%20.61%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (PPO Cold Start)55.78%60.65%41.79%48.09%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)59.13%65.66%52.24%42.75%
+ +Table 2: API-Bank Test Results (Main Result) + +
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400)24.8%0.78
Qwen2.5-1.5B-Instruct (SFT4k)23.2%1.25
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (PPO Cold Start)23.2%2.38
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)44.0%1.19
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400)54.4%0.86
Qwen2.5-3B-Instruct (SFT4k)49.6%0.92
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (PPO Cold Start)40.0%1.14
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)60.0%1.32
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400)28.8%3.71
Qwen2.5-7B-Instruct (SFT4k)30.4%1.06
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (PPO Cold Start)48.0%1.25
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)72.0%1.63
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400)44.0%0.98
Llama-3.2-3B-Instruct (SFT4k)48.8%0.98
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (PPO Cold Start)29.6%1.42
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)52.0%0.89
+ +Table 3: Bamboogle Test Results (Main Result) + +when trained from scratch. These findings highlight that while PPO can benefit from our reward design, its impact is more limited compared to the more robust and consistent improvements observed with GRPO. + +Generalization Studies. We evaluate the generalization ability of our trained model in two challenging settings: unfamiliar scenarios and novel task goals (both from BFCL benchmark subset). Specifically, we test the model's performance in tool usage within unseen programming languages and its ability to detect irrelevant tools, neither of + +![](images/edc4a0db668429fb699878570feac579cd2731eaea04e93c2eba09dd09e6b856.jpg) +(a) Unfamiliar Scenario + +![](images/9da49ecd03180b3351e89de045a7633df7596f714deea3d3adb864ec3d0ad88d.jpg) +(b) Unfamiliar Goal +Figure 7: Qwen2.5-3B-Instruct's performance across unfamiliar programming language scenarios (left) and novel relevance detection task goals (right), evaluated under different training settings. + +User: I would like to buy a movie ticket in San Jose at 11 o'clock in the night. Tools Available: + +1. Movies_1_BuyMovieTickets/movie_name, number_ofTickets, ...) +$\rightarrow$ Buy tickets for a movie at a specific time and location. +2. Movies 1 FindMovies location. +$\rightarrow$ Search movies by location, genre, or format. +3. Movies_1_GetTimesForMovie/movie_name, location, ...) +$\rightarrow$ Get showtimes for a movie on a specific date and location. + + The user wants to buy a movie ticket in San Jose at 11 PM. The available + +tool, Movies_1_BuyMovieTickets, allows us to purchase movie tickets with the required details such as movie name, number of tickets, show date, location, show time, and show type. We need to find the movie name and the show date. However, the user did not specify the movie name or the show date. We can infer the show date from the current date, but we need to know the movie name to proceed with purchase. + + To assist in purchasing a movie ticket, I need the name of the movie and + +the specific date of the show. Could you please provide this information? + +User: What is the slope of the line which is perpendicular to the line with the + +equation $y = 3x + 2$ ? + +Tools Available: + +1. findcritical_points(function, variable, range=[0.0, 3.4]) +$\rightarrow$ Find critical points of a function with respect to a variable, optionally within a range. + + The problem is asking for the slope of a line that is perpendicular to the line with the equation $y = 3x + 2$ . The slope of the given line is 3. The slope of a line that is perpendicular to another line is the negative reciprocal of the original line's slope. So, the slope of the line perpendicular to $y = 3x + 2$ would be -1/3. The given tool find_critical_points is not relevant to finding the slope of a line and cannot be used to solve this problem. + + The slope of the line that is perpendicular to the line with the + +equation $y = 3x + 2$ is -1/3. + +# Table 4: Cases of Qwen2.5-3B-Instruct (GRPO Cold Start) generalizing to irrelevant tool detection goals. + +which were explicitly included during RL training or in the dataset. As shown in Figure 7, Qwen2.5-3B-Instruct, when trained from scratch with our GRPO-based reward design, consistently achieves highest performance. Additionally, Section 4.3 presents two qualitative examples where the model proactively rejects inappropriate tool use—first by clarifying ambiguous intent, and second by opting to answer directly without tools. These behaviors reflect emergent proactivity and metacognition, enhancing efficiency, reducing hallucinations, and signaling foundational agentic intelligence. + +Free-form Inference Effectiveness. While our model is trained with a focus on tool call format and correctness, we further evaluate its ability to handle free-form tool use in a QA setting. Unlike the structured tool selection and application tasks, QA setting: (1) imposes no constraints on tool call parameters, and (2) evaluates only the final answer, making it a "goal-oriented" rather than a "process-oriented" task. This naturally introduces a multi-step interaction scenario. + +Specifically, we use Bamboogle, a multi-hop QA dataset, to assess this capability. The model is equipped with a web search tool, and we report both the answer accuracy and the number of tool calls for all baselines and our approach. As shown in Table 3, our reward design achieves the highest + +![](images/3f289fa8a90fbf8fbf687744f30d2bb325c3c09d051f0b7147fec1d6f9461d2b.jpg) +(a) Response Length + +![](images/aa1c9884a4981a22763d4b52e5c17a6c9e5dc5abe1c1948ebb70001bf6540d0a.jpg) +(b) Length Reward +Figure 8: Response length (left) and its reward (right) trends across training steps for different models. + +performance, despite this setting not being explicitly seen during training. Notably, our cold start GRPO model surpasses others in accuracy without relying on excessive number of tool calls. This suggests that the model can flexibly invoke tools when needed, effectively leverage feedback, wisely and efficiently navigating toward the correct answer. + +# 5 Analysis + +In this section, we conduct a series of ablation studies to identify the most effective reward design for tool calling. We explore various factors including reward type, scale, granularity, and temporal dynamics. + +# 5.1 Effect of Length Reward + +We first examine the role of a length-based reward. Prior work has demonstrated that the R1-like models can promote deeper reasoning, often reflected in longer thinking traces. To encourage this behavior, we introduce a reward term proportional to the length of the field: + +$$ +\mathcal {R} _ {\text {l e n g t h}} = \min \left(\frac {L _ {\text {t h i n k}}}{L _ {\text {t a r g e t}}}, 1\right) +$$ + +where $L_{\text{think}}$ denotes the length of the thinking segment in model's output, and $L_{\text{target}}$ denotes the target output length, which we empirically set to 512. We found that the raw model rarely generates responses longer than half this length, making 512 a reasonable and effective target for encouraging longer outputs. This length-based component is added to the overall reward, which now consists of format, correctness, and reasoning length. + +As shown in Figure 8, both response length and the length reward generally increase throughout training, particularly for the Qwen model series. This indicates that the length reward effectively encourages longer reasoning. However, the downstream results in Table 5 reveal that adding a length + +
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (w/ Length Reward)33.23%70.58%71.36%35.63%0.50%94.44%4.52%
Qwen2.5-1.5B-Instruct (Dynamic)28.51%53.23%48.23%38.07%0.00%55.56%25.08%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (w/ Length reward)48.89%77.83%78.61%63.56%4.50%88.24%71.22%
Qwen2.5-3B-Instruct (Dynamic)48.24%77.60%79.11%63.22%3.00%88.89%68.53%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (w/ Length reward)44.98%78.02%77.54%56.55%1.25%100.00%63.76%
Llama-3.2-3B-Instruct (Dynamic)43.15%75.50%71.64%56.06%1.00%100.00%57.82%
+ +Table 5: BFCL V3 Benchmark Results (Length) + +![](images/e489e46a097675008b0b2b8264d88eeba35e4558c034f26f0ab7174772e2d697.jpg) +(a) Response Length + +![](images/d4f5c580a080fa660a72ece01f1f516aab789459b4ebb589abcec9b899671c97.jpg) +(b) Length Reward +Figure 9: Response length (left) and its reward (right) trends across training steps within the dynamic length reward training setting. + +reward does not consistently improve task performance, and in smaller-scale models, it can even cause substantial degradation. These observations suggest that while extended reasoning may appear desirable, it is not always beneficial for tool use tasks. In fact, excessive length may introduce unnecessary complexity, leading to overthinking and reduced effectiveness. + +Dynamic Length Reward. Since fixed-length rewards showed minimal impact and converged quickly, we explored a dynamic length reward that adapts over training steps. Specifically, we define: + +$$ +\mathcal {R} _ {\mathrm {d y n a m i c}} = \min \left(\frac {L _ {\mathrm {t h i n k}}}{L _ {\mathrm {t a r g e t}} \cdot (1 + p)}, 1\right) +$$ + +where $S$ denotes the training steps and $p = \frac{S_{\mathrm{current}}}{S_{\mathrm{total}}} \in [0,1]$ represents the normalized training progress. This formulation gradually increases the target thinking length over time, aligning with model maturity. + +As shown in fig. 9, this approach yields a steadier growth in thinking length, particularly for the Llama model. However, the performance results in Table 5 reveal that even scheduled rewards fail to improve performance. This further supports our hypothesis that extended reasoning may not benefit this task and can even have adverse effects. + +Takeaway 1: While length rewards encourage longer reasoning traces, they do not consistently improve task performance and may even harm it in smaller models, highlighting that longer reasoning is not inherently better for tool use tasks. + +# 5.2 Effect of Reward Scale + +Next, we investigate the effect of reward scaling, specifically the relative weighting between correctness and format rewards. Prior work in R1-style RL commonly assigns a higher weight to correctness reward than to format reward (Xie et al., 2025; Jin et al., 2025), emphasizing the importance of learning correct answer over superficial adherence to format. This strategy helps prevent reward hacking, where a model might exploit formatting heuristics without learning task semantics. + +To test the importance of this design choice, we conduct an ablation where we equalize the maximum correctness and format rewards by setting the former's range to $[-1, 1]$ , matching that of the format reward. This adjustment only affects the final normalization step of the correctness reward: + +$$ +\mathcal {R} _ {\mathrm {c o r r e c t}} = 2 \cdot \frac {R _ {\mathrm {m a x}}}{S _ {\mathrm {m a x}}} - 1 \in [ - 1, 1 ] +$$ + +where all variables are defined as in Section 3.3. + +As shown in Table 6, this equal-scaling variant, denoted as "Equal Max", results in a slight drop in overall accuracy across most models, with the exception of Qwen2.5-3B, which maintains performance comparable to the original setting. These results underscore the importance of assigning greater weight to correctness reward: doing so helps steer the model toward mastering the core reasoning and tool use capabilities necessary for robust generalization. + +Dynamic Reward Scaling. Building on the insight that correctness reward plays a more critical + +
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Equal max)39.47%78.56%75.50%45.45%2.50%100.00%16.44%
Qwen2.5-1.5B-Instruct (Two stage)38.85%77.96%76.23%44.51%2.25%100.00%10.61%
Qwen2.5-1.5B-Instruct (Dynamic)45.71%78.31%75.73%58.91%2.50%100.00%57.20%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Equal max)51.76%81.50%79.50%69.79%4.25%88.89%78.07%
Qwen2.5-3B-Instruct (Two stage)50.66%80.62%78.82%67.93%3.50%88.89%76.42%
Qwen2.5-3B-Instruct (Dynamic)53.81%81.44%80.75%75.43%3.62%77.78%88.82%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Equal max)42.47%67.77%75.05%55.75%1.00%88.89%59.56%
Llama-3.2-3B-Instruct (Two stage)41.33%65.54%72.70%55.22%0.75%88.89%57.59%
Llama-3.2-3B-Instruct (Dynamic)46.85%83.00%72.77%61.00%3.38%88.89%59.37%
+ +Table 6: BFCL V3 Benchmark Results (Scale) + +role, we are further motivated by the intuition that different reward components may benefit from being emphasized at different stages of training. This leads us to explore dynamically adjusting reward scales in accordance with training progress. Specifically, we hypothesize that in early training, the model should prioritize learning the correct output format, which entails an easier objective, before gradually shifting focus to the more challenging goal of tool use correctness. To test this hypothesis, we design two dynamic reward scaling strategies: + +- Two stage (Coarse) Setting: We divide training into two phases. In the first $s$ training steps, we downscale the correctness reward to $\frac{1}{3}$ of its original scale while keeping the format reward at its original scale. After step $s$ , we restore the correctness reward to its original scale and simultaneously reduce the format reward to range $[0, 0.5]$ ( $\frac{1}{2}$ of its original scale). Formally the reward scales are: + +$$ +\operatorname {S c a l e} _ {\text {f o r m a t}} = \left\{ \begin{array}{l l} [ 0, 1 ] & \text {i f S _ {\text {c u r r e n t}} < s} \\ [ 0, 0. 5 ] & \text {o t h e r w i s e} \end{array} , \right. +$$ + +$$ +\operatorname {S c a l e} _ {\text {c o r r e c t}} = \left\{ \begin{array}{l l} [ - 1, 1 ] & \text {i f S _ {\text {c u r r e n t}} < s} \\ [ - 3, 3 ] & \text {o t h e r w i s e} \end{array} \right. +$$ + +where $S_{\mathrm{current}}$ denotes the current training step. In our experiments, we empirically set the switching point to $s = 30$ steps, as we observed that the format reward typically experiences a significant increase within the first 30 steps. Therefore, it is more beneficial for later steps to shift focus toward optimizing correctness. + +- Dynamic (Finegrained) Setting: We apply continuous interpolation between the two reward scales throughout training. Initially, both the format and correctness reward scales are set equally. Over time, the format reward scale linearly decays to its original value, while the correctness + +![](images/c4a42ce99210f1c629662d6104f163b5fa2dbc2a5dfddaa2923d8c1986ba915a.jpg) +(a) Format Reward + +![](images/07889187074a1d5140bc409ad1bd803fcc49ff172e176507ffd4fb8fa1a0b325.jpg) +(b) Correctness Reward +Figure 10: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different reward scale dynamics. + +reward scale gradually increases to its original value, allowing the training to shift focus from format adherence to task correctness accordingly. Formally, the dynamic scaling is then defined as: + +$$ +\operatorname {S c a l e} _ {\text {f o r m a t}} = [ - 2 + p, 2 - p ], +$$ + +$$ +\operatorname {S c a l e} _ {\text {c o r r e c t}} = [ - 2 - p, 2 + p ] +$$ + +where $p \in [0,1]$ similarly represents the normalized training progress. This design ensures a smooth shift of learning focus from format fidelity to correctness. + +We present the reward dynamics of the original and two dynamic scaling strategies in Figure 10. As shown in Table 6, the Two stage (Coarse) reward setting unexpectedly leads to a drop in performance, whereas the Dynamic (Finegrained) scaling could improve model's benchmarking performance. These findings suggest that abrupt shifts in reward scale may negatively impact the training dynamics. In contrast, a smoother and gradual transition from simpler objectives to more nuanced ones appears to better support the model's learning trajectory and generalization during GRPO training. + +![](images/ff3197de12e4e5c6e186807989a91719f5b1f0d72bc4490312c2ad49c44e81f4.jpg) + +Takeaway 2: Gradually adjusting reward scales during training, rather than abrupt changes, better supports model learning and generalization, highlighting the benefits of a smoother transition from simpler objectives to more complex ones. + +# 5.3 Effect of Reward Granularity + +We now perform a detailed analysis of the effect of reward granularity, focusing specifically on the correctness reward. Tool calling, by nature, poses challenges for reward assignment, as it involves multiple facets beyond a single definitive answer (e.g., in contrast to math reasoning tasks). Our original reward design decomposes correctness into matching the tool name, parameter names, and parameter values, offering a finegrained, "process-oriented" signal that reflects partial correctness in tool usage. + +To assess the impact of this granularity, we evaluate three alternative reward formulations with progressively coarser levels of aggregation: + +- Finegrained: We apply strict exact-match constraints to both tool name and parameter name matching. Specifically, we define: + +$$ +r _ {\text {n a m e}} = \mathbb {1} \left[ N _ {G} = N _ {P} \right] \in \{0, 1 \} +$$ + +$$ +r _ {\text {p a r a m}} = \sum_ {G _ {j} \in G} \mathbb {1} \left[ \operatorname {k e y s} \left(P _ {G}\right) = \operatorname {k e y s} \left(P _ {P}\right) \right] \in [ 0, | G | ] +$$ + +- Intermediate: We combine the parameter name and value rewards into a single term that enforces an exact match on the entire parameter dictionary. Formally: + +$$ +r _ {\text {p a r a m}} + r _ {\text {v a l u e}} = \sum_ {G _ {j} \in G} \mathbb {1} \left[ P _ {G} = P _ {P} \right] \in [ 0, | G | ] +$$ + +- Coarse: At the coarsest level, we fully entangle tool name, parameter names, and parameter values, treating the entire tool set as a unit. Reward is given only if the generated tool set exactly matches the ground truth: + +$$ +r _ {\text {n a m e}} + r _ {\text {p a r a m}} + r _ {\text {v a l u e}} = \mathbb {1} [ G = P ] \in \{0, 1 \} +$$ + +All other aspects of reward computation are kept identical to those described in Section 3.3. Starting from our original design, which is the most finegrained, we progressively entangle reward components to derive increasingly coarse-grained alternatives. + +![](images/552649707e12fd059580188a20cb855e21ec6e4f969add3bd77cecc592a62b4e.jpg) +Figure 11: Correctness reward trends across training steps for Qwen2.5-3B-Instruct with different reward granularity. + +The reward dynamics across training steps, shown in Figure 11, demonstrate that as reward granularity becomes coarser, it becomes harder for the model to achieve higher reward values during RL training. This suggests that overly strict and entangled rewards may lead to sparse learning signals, potentially hindering effective credit assignment. + +Empirical results in Table 7 further support this insight: our original, most finegrained reward strategy performs well across models. In general, finer-grained reward decomposition leads to better training outcomes and higher final task performance, indicating its advantage in promoting more stable and effective policy learning. + +![](images/1454ca022118595022cec19aa91d5512614820037c89daf0cd26bb64d0b07139.jpg) + +Takeaway 3: Finegrained reward decomposition provides richer learning signals, highlighting its role in enabling more effective training compared to coarse reward formulations, which can impede progress and degrade final performance. + +# 6 Conclusion + +In this paper, we present a reward design tailored for GRPO training on tool use tasks. Empirically, our model trained from scratch using GRPO consistently outperforms both SFT-based and SFT-initialized RL baselines, as well as models trained with alternative RL algorithms, across a variety of held-out tool use benchmarks. Furthermore, we demonstrate that our model generalizes well to QA settings, exhibiting robust multi-turn interactions, emergent proactiveness, and metacognitive behaviors, all of which are key traits for efficient and adaptable tool use, lying at the core of foundational agent capabilities. Our in-depth analysis of reward types, scaling strategies, granularity, and temporal dynamics provides further insights into how reward shaping influences learning and behavior. We hope these findings serve as a roadmap for future work + +
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Finegrained)40.71%78.00%75.55%48.91%2.00%100.00%24.84%
Qwen2.5-1.5B-Instruct (Intermediate)37.65%77.94%72.46%43.00%1.62%100.00%12.45%
Qwen2.5-1.5B-Instruct (Coarse)36.72%76.44%70.86%41.27%2.12%100.00%12.24%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Finegrained)52.06%81.65%79.64%69.21%5.50%83.33%78.14%
Qwen2.5-3B-Instruct (Intermediate)51.36%81.15%80.07%68.64%4.25%88.89%75.74%
Qwen2.5-3B-Instruct (Coarse)51.40%79.48%78.54%68.73%5.62%88.89%77.80%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Finegrained)39.82%64.71%70.68%52.20%0.25%100.00%56.68%
Llama-3.2-3B-Instruct (Intermediate)38.62%59.83%71.86%50.56%0.25%94.44%55.68%
Llama-3.2-3B-Instruct (Coarse)35.95%52.00%61.43%48.96%1.12%83.33%61.92%
+ +Table 7: BFCL V3 Benchmark Results (Granularity) + +in applying reinforcement learning to tool use. Ultimately, we envision that reward is all tool learning needs, and that RL offers a powerful path toward generalizable and creative agent behavior. + +# References + +Emre Can Acikgoz, Jeremiah Greer, Akul Datta, Ze Yang, William Zeng, Oussama Elachqar, Emmanuel Koukoumidis, Dilek Hakkani-Tur, and Gokhan Tur. 2025. Can a single model master both multi-turn conversations and tool use? coalm: A unified conversational agentic language model. Preprint, arXiv:2502.08820. + +Jinheon Baek, Sujay Kumar Jauhar, Silviu Cucerzan, and Sung Ju Hwang. 2024. Researchagent: Iterative research idea generation over scientific literature with large language models. arXiv preprint arXiv:2404.07738. + +Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. 2023a. Fireact: Toward language agent fine-tuning. arXiv preprint arXiv:2310.05915. + +Nuo Chen, Hongguang Li, Baoyuan Wang, and Jia Li. 2023b. From good to great: Improving math reasoning with tool-augmented interleaf prompting. arXiv preprint arXiv:2401.05384. + +Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W Cohen. 2022. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. arXiv preprint arXiv:2211.12588. + +Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. 2024. Agent-FLAN: Designing data and methods of effective agent tuning for large language models. In *Findings of the Association for Computational Linguistics: ACL* 2024, pages 9354–9366, Bangkok, Thailand. Association for Computational Linguistics. + +Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V + +Le, Sergey Levine, and Yi Ma. 2025. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161. + +Quy-Anh Dang and Chris Ngo. 2025. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219. + +Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783. + +Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452. + +Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948. + +Jiangyong Huang, Silong Yong, Xiaojian Ma, Xiongkun Linghu, Puhao Li, Yan Wang, Qing Li, Song-Chun Zhu, Baoxiong Jia, and Siyuan Huang. 2023. An embodied generalist agent in 3d world. arXiv preprint arXiv:2311.12871. + +Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. 2024. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489. + +Yoshitaka Inoue, Tianci Song, and Tianfan Fu. 2024. Drugagent: Explainable drug repurposing agent with large language model-based reasoning. arXiv preprint arXiv:2408.13378. + +Bowen Jin, Hansi Zeng, Zhenrui Yue, Dong Wang, Hamed Zamani, and Jiawei Han. 2025. Search: Training lms to reason and leverage search engines with reinforcement learning. arXiv preprint arXiv:2503.09516. + +Minki Kang, Jongwon Jeong, and Jaewoong Cho. 2025. T1: Tool-integrated self-verification for test-time compute scaling in small language models. arXiv preprint arXiv:2504.04718. +Timo Kaufmann, Paul Weng, Viktor Bengs, and Eyke Hüllermeier. 2023. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925. +Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025. Lm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321. +Minghao Li, Yingxiu Zhao, Bowen Yu, Feifan Song, Hangyu Li, Haiyang Yu, Zhoujun Li, Fei Huang, and Yongbin Li. 2023. Api-bank: A comprehensive benchmark for tool-augmented llms. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 3102-3116. +Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025a. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886. +Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025b. Torl: Scaling tool-integrated rl. arXiv preprint arXiv:2503.23383. +Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. 2024. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190. +Qiqiang Lin, Muning Wen, Qiuying Peng, Guanyu Nie, Junwei Liao, Jun Wang, Xiaoyun Mo, Jiamu Zhou, Cheng Cheng, Yin Zhao, et al. 2024. Hammer: Robust function-calling for on-device language models via function masking. arXiv preprint arXiv:2410.04587. +Chen Ling, Xujiang Zhao, Jiaying Lu, Chengyuan Deng, Can Zheng, Junxiang Wang, Tanmoy Chowdhury, Yun Li, Hejie Cui, Xuchao Zhang, et al. 2023. Domain specialization as the key to make large language models disruptive: A comprehensive survey. arXiv preprint arXiv:2305.18703. +Weiwen Liu, Xu Huang, Xingshan Zeng, Xinlong Hao, Shuai Yu, Dexun Li, Shuai Wang, Weinan Gan, Zhengying Liu, Yuanqing Yu, et al. 2024. Toolace: Winning the points of llm function calling. arXiv preprint arXiv:2409.00920. +Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. Advances in Neural Information Processing Systems, 37:124198-124235. +Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2023. Gorilla: Large language model connected with massive apis. arXiv preprint arXiv:2305.15334. + +Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems, 37:126544-126565. +Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A Smith, and Mike Lewis. 2022. Measuring and narrowing the compositionality gap in language models. arXiv preprint arXiv:2210.03350. +Cheng Qian, Emre Can Acikgoz, Hongru Wang, Xiusi Chen, Avirup Sil, Dilek Hakkani-Tur, Gokhan Tur, and Heng Ji. 2025. Smart: Self-aware agent for tool overuse mitigation. arXiv preprint arXiv:2502.11435. +Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. 2023. Creator: Tool creation for disentangling abstract and concrete reasoning of large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 6922-6939. +Cheng Qian, Peixuan Han, Qinyu Luo, Bingxiang He, Xiusi Chen, Yuji Zhang, Hongyi Du, Jiarui Yao, Xiaocheng Yang, Denghui Zhang, et al. 2024a. Escapebench: Pushing language models to think outside the box. arXiv preprint arXiv:2412.13549. +Cheng Qian, Chenyan Xiong, Zhenghao Liu, and Zhiyuan Liu. 2024b. Toolink: Linking toolkit creation and using through chain-of-solving on open-source model. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 831-854. +Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. 2024a. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982. +Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Yufei Huang, Chaojun Xiao, Chi Han, et al. 2023. Tool learning with foundation models. arXiv preprint arXiv.2304.08354, 10. +Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Xuanhe Zhou, Yufei Huang, Chaojun Xiao, et al. 2024b. Tool learning with foundation models. ACM Computing Surveys, 57(4):1-40. +Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Lauren Hong, Runchu Tian, Ruobing Xie, Jie Zhou, Mark Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024c. Toollm: Facilitating large language models to master 16000+ real-world apis. In The Twelfth International Conference on Learning Representations. + +Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741. +Yusuf Roohani, Andrew Lee, Qian Huang, Jian Vora, Zachary Steinhart, Kexin Huang, Alexander Marson, Percy Liang, and Jure Leskovec. 2024. Biodiscoveryagent: An ai agent for designing genetic perturbation experiments. arXiv preprint arXiv:2405.17631. +Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettle-moyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551. +John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300. +Haozhan Shen, Peng Liu, Jingcheng Li, Chunxin Fang, Yibo Ma, Jiajia Liao, Qiaoli Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, et al. 2025. Vlmr1: A stable and generalizable r1-style large vision-language model. arXiv preprint arXiv:2504.07615. +Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv:2409.19256. +Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592. +Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599. +Qwen Team. 2024. Qwen2.5: A party of foundation models. +Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214. + +Yidong Wang, Qi Guo, Wenjin Yao, Hongbo Zhang, Xin Zhang, Zhen Wu, Meishan Zhang, Xinyu Dai, Qingsong Wen, Wei Ye, et al. 2024. Autosurvey: Large language models can automatically write surveys. Advances in Neural Information Processing Systems, 37:115119-115145. +Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768. +Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations. +Yining Ye, Xin Cong, Shizuo Tian, Yujia Qin, Chong Liu, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2023. Rational decision-making agent with internalized utility judgment. arXiv preprint arXiv:2308.12519. +Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476. +Yuanqing Yu, Zhefan Wang, Weizhi Ma, Zhicheng Guo, Jingtao Zhan, Shuai Wang, Chuhan Wu, Zhiqiang Guo, and Min Zhang. 2024. Steptool: A step-grained reinforcement learning framework for tool learning in llms. arXiv preprint arXiv:2410.07745. +Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, et al. 2025. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118. +Aohan Zeng, Mingdao Liu, Rui Lu, Bowen Wang, Xiao Liu, Yuxiao Dong, and Jie Tang. 2024. AgentTuning: Enabling generalized agent abilities for LLMs. In Findings of the Association for Computational Linguistics: ACL 2024, pages 3053-3077, Bangkok, Thailand. Association for Computational Linguistics. +Yuanzhao Zhai, Tingkai Yang, Kele Xu, Feng Dawei, Cheng Yang, Bo Ding, and Huaimin Wang. 2024. Enhancing decision-making for llm agents via step-level q-value models. arXiv preprint arXiv:2409.09345. +Hongxin Zhang, Weihua Du, Jiaming Shan, Qinhong Zhou, Yilun Du, Joshua B Tenenbaum, Tianmin Shu, and Chuang Gan. 2023. Building cooperative embodied agents modularly with large language models. arXiv preprint arXiv:2307.02485. +Jianguo Zhang, Tian Lan, Ming Zhu, Zuxin Liu, Thai Hoang, Shirley Kokane, Weiran Yao, Juntao Tan, Akshara Prabhakar, Haolin Chen, et al. 2024. xlam: A family of large action models to empower ai agent systems. arXiv preprint arXiv:2409.03215. + +Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. Deepresearch: Scaling deep research via reinforcement learning in real-world environments. arXiv preprint arXiv:2504.03160. + +# Appendix + +# A User Prompt Details + +The system instruction is shown in Figure 4. The user prompt is used to store the trajectory history, including intermediate thoughts, tool calls, environment observations, and any additional user commands. The complete user instruction is presented in Figure 12. + +# B Experiment Details + +Training Data Details. We empirically use 4K data points for training, as each dataset consists of samples drawn from the same distribution. Adding more data of similar nature does not increase task diversity. Moreover, we observe that increasing the dataset size beyond 4K does not yield noticeable improvements in the training convergence or final performance, suggesting diminishing returns from additional data under this setting. + +GRPO Setting Details. For all the tool calls in the dataset, we all use JSON format to represent tool call as it's easy to parse and is the most general and structure way of performing tool call. For the GRPO training, we use 2 A100 (80G) GPUs per run with the following hyper-parameters: + +
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length2048
Max Response Length1024
Optimization
Learning Rate1e-6
PPO Mini Batch Size128
KL Loss UsedFalse
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.6
Number of Rollouts4
Training & Logging
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
+ +Baselines. The 400 selected data points used for SFT share the same distribution as the 4k data points used for RL training, but differ in content. For SFT, each data point includes a field, + +with thought content distilled from Deepseek-R1 trajectories. In contrast, GRPO does not require ground truth thought, as only the tool calls are used to compute rewards in the GRPO setting. + +We use 400 data points for SFT based on empirical observations that this amount is sufficient to help the raw model learn to follow our tool call format. This provides a stronger initialization and reduces the burden of learning the format from scratch during RL training. However, we also find that relying solely on SFT can lead to overfitting, which may ultimately degrade performance. + +PPO Setting Details. We apply approximately the same parameter settings as GRPO for the PPO training. Similarly, we use 2 A100 (80G) GPUs per run with the following hyper-parameters: + +Table 8: Configuration for GRPO training. + +
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length1024
Max Response Length512
Optimization
Actor Learning Rate1e-6
Critic Learning Rate1e-5
PPO Mini Batch Size128
PPO Micro Batch Size8
KL Coefficient0.001
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.3
Training & Logging
Critic Warmup Steps0
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
+ +Table 9: Configuration for PPO training. + +# C Additional Results + +We present additional results on three benchmarks, applying GRPO and PPO methods to models initialized with SFT on 4K data points. This setting serves as a "theoretical" upper bound, since the same 4K data is first used for SFT and subsequently reused for RL training. + +The results are shown in Table 10, Table 11, and Table 12 for BFCL, API-Bank, and Bamboogle, respectively. We compare RL training initialized with models fine-tuned on either 400 or 4K SFT data points. + +![](images/86c8046a4208455b9d278a90cc615af285d011f3f28c85a707ecd1bb20d82e49.jpg) +Figure 12: The user prompt used for TIR's rollout. + +
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)40.24%66.42%62.02%54.58%2.50%94.12%55.09%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)42.63%66.60%64.77%60.15%1.38%88.89%67.98%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (SFT4k+PPO)48.22%77.75%73.18%64.27%5.25%94.12%66.41%
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.82%75.12%69.52%68.19%2.38%77.78%76.16%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100%14.19%
Qwen2.5-7B-Instruct (SFT4k+PPO)33.80%42.67%49.50%51.80%2.38%77.78%55.79%
Qwen2.5-7B-Instruct (SFT4k+GRPO)35.18%43.58%50.39%55.49%0.87%77.78%67.12%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (SFT4k+PPO)45.41%73.71%68.46%62.27%2.50%82.35%68.75%
Llama-3.2-3B-Instruct (SFT4k+GRPO)45.50%70.69%67.70%64.73%1.00%77.78%78.85%
+ +Table 10: BFCL V3 Benchmark Results (Additional Result) + +
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)61.31%64.91%56.72%52.67%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)59.46%65.16%53.73%45.04%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (SFT4k+PPO)60.13%64.41%44.78%54.96%
Qwen2.5-3B-Instruct (SFT4k+GRPO)60.80%64.41%56.72%51.91%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.4%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (SFT4k+PPO)59.30%61.40%40.30%61.60%
Qwen2.5-7B-Instruct (SFT4k+GRPO)52.60%56.39%34.33%50.38%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (SFT4k+PPO)54.10%60.65%40.30%41.22%
Llama-3.2-3B-Instruct (SFT4k+GRPO)50.92%59.15%34.33%34.35%
+ +Table 11: API-Bank Test Results (Additional Result) + +
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (SFT4k+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)34.4%1.02
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (SFT4k+PPO)46.4%1.01
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.2%0.98
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (SFT4k+PPO)40.0%1.25
Qwen2.5-7B-Instruct (SFT4k+GRPO)32.0%1.25
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (SFT4k+PPO)49.6%1.02
Llama-3.2-3B-Instruct (SFT4k+GRPO)42.4%1.03
+ +Table 12: Bamboogle Test Results (Additional Result) + +Interestingly, our findings suggest that initializing from a model finetuned on 4K data does not consistently outperform initialization from a model finetuned on only 400 data points. In the BFCL benchmark, we even observe cases where perfor + +mance drops below that of the raw instruct model. This counterintuitive result may stem from overfitting during the SFT phase, which could restrict the model's ability to explore during RL training and lead to poorer generalization on held-out tasks. \ No newline at end of file diff --git a/data/2025/2504_13xxx/2504.13958/images/07889187074a1d5140bc409ad1bd803fcc49ff172e176507ffd4fb8fa1a0b325.jpg b/data/2025/2504_13xxx/2504.13958/images/07889187074a1d5140bc409ad1bd803fcc49ff172e176507ffd4fb8fa1a0b325.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b20c0953ccadad95a68ea4e2435d46e146111f92 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/07889187074a1d5140bc409ad1bd803fcc49ff172e176507ffd4fb8fa1a0b325.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d183c32b4b47b93e1ddc5b576ca840e152b195c965954403381cf83ae50965ad +size 11220 diff --git a/data/2025/2504_13xxx/2504.13958/images/09cf0aa386c623786b51eee799f30c5b2ff563161f0926be10a80c2546743135.jpg b/data/2025/2504_13xxx/2504.13958/images/09cf0aa386c623786b51eee799f30c5b2ff563161f0926be10a80c2546743135.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86c07a58d12e9292f0556b3effdd10502edd635b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/09cf0aa386c623786b51eee799f30c5b2ff563161f0926be10a80c2546743135.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce03cce3610e2aa04e9e86c7cfedb80b22d6a88e345f6958bf32017284f53166 +size 7361 diff --git a/data/2025/2504_13xxx/2504.13958/images/1454ca022118595022cec19aa91d5512614820037c89daf0cd26bb64d0b07139.jpg b/data/2025/2504_13xxx/2504.13958/images/1454ca022118595022cec19aa91d5512614820037c89daf0cd26bb64d0b07139.jpg new file mode 100644 index 0000000000000000000000000000000000000000..55ce7a780d084d587a36bc41606b6ec274be1509 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/1454ca022118595022cec19aa91d5512614820037c89daf0cd26bb64d0b07139.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bea8f399b1d6311308170ba0f641d485d1cf388837e99beb56e42680d4e03797 +size 1227 diff --git a/data/2025/2504_13xxx/2504.13958/images/2151efeafc6930f4d8fc699da769a1f209b9d453a3e8ec9bf3cdf28ea194ce90.jpg b/data/2025/2504_13xxx/2504.13958/images/2151efeafc6930f4d8fc699da769a1f209b9d453a3e8ec9bf3cdf28ea194ce90.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c06784fe5e18d42fd54b99ad70e70f8dcb026cda --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/2151efeafc6930f4d8fc699da769a1f209b9d453a3e8ec9bf3cdf28ea194ce90.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35780fcdbaf5986dbebf9721f6ccd81803bf48096c4bd66969b41c4b593f707f +size 83271 diff --git a/data/2025/2504_13xxx/2504.13958/images/2cb5879e2c3a15c5bee23b68da6e3da2bed3470c8b34ef0ea35c0e16dd02548c.jpg b/data/2025/2504_13xxx/2504.13958/images/2cb5879e2c3a15c5bee23b68da6e3da2bed3470c8b34ef0ea35c0e16dd02548c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78a20f0fdb3bc9d6cb310752e6413ccbf91e7281 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/2cb5879e2c3a15c5bee23b68da6e3da2bed3470c8b34ef0ea35c0e16dd02548c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1874fbb9d5b42469e240fee72be55d2f3e850e21b8a5fd0168588e25e6b81100 +size 8984 diff --git a/data/2025/2504_13xxx/2504.13958/images/3b638561a7bdff312708991620c15b142a3c0bd5ee01aaa71094fb8b17e8fa85.jpg b/data/2025/2504_13xxx/2504.13958/images/3b638561a7bdff312708991620c15b142a3c0bd5ee01aaa71094fb8b17e8fa85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..194f716078f40da444233be084982be995664053 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/3b638561a7bdff312708991620c15b142a3c0bd5ee01aaa71094fb8b17e8fa85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ae66ed73e50cb702e2ff694d386274c33eb3382e3978e6650169642a14ef06e +size 6046 diff --git a/data/2025/2504_13xxx/2504.13958/images/3b949f3b4313649aa4b382e0445d2ea3d6d9a666abd057e2de2228351c351419.jpg b/data/2025/2504_13xxx/2504.13958/images/3b949f3b4313649aa4b382e0445d2ea3d6d9a666abd057e2de2228351c351419.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b732505c659e5ff4746f3bc4f11487fe6897ce38 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/3b949f3b4313649aa4b382e0445d2ea3d6d9a666abd057e2de2228351c351419.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3708436c708d104e7a8bc418cb96d809fbfcbc2aa99439a2374bacdc803f5117 +size 100802 diff --git a/data/2025/2504_13xxx/2504.13958/images/3f289fa8a90fbf8fbf687744f30d2bb325c3c09d051f0b7147fec1d6f9461d2b.jpg b/data/2025/2504_13xxx/2504.13958/images/3f289fa8a90fbf8fbf687744f30d2bb325c3c09d051f0b7147fec1d6f9461d2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64771b0e31d5aab3fb9477181235ef4e27b1a92c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/3f289fa8a90fbf8fbf687744f30d2bb325c3c09d051f0b7147fec1d6f9461d2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90608f197776004fea2743a53f3d4c125d9ccd1d92e85a6bdb90c20a53183a1e +size 11355 diff --git a/data/2025/2504_13xxx/2504.13958/images/415783b912665132f653d6829cf12aed85a413ce767be1229f19c574662eae1e.jpg b/data/2025/2504_13xxx/2504.13958/images/415783b912665132f653d6829cf12aed85a413ce767be1229f19c574662eae1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f00de58277912ffb9ce4038640638f1b59c9455b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/415783b912665132f653d6829cf12aed85a413ce767be1229f19c574662eae1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fd4d8249079d2daf005bbeb84465840e4d17d590275d1bbe828eac4aa996035 +size 5923 diff --git a/data/2025/2504_13xxx/2504.13958/images/475af02afe198697f308f929d8e923cc81513bc679eef7fd36608576594447bf.jpg b/data/2025/2504_13xxx/2504.13958/images/475af02afe198697f308f929d8e923cc81513bc679eef7fd36608576594447bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..69516dd499fc8c184a29fff051e78cff721aa729 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/475af02afe198697f308f929d8e923cc81513bc679eef7fd36608576594447bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a93726da89affe8c6386f8aae1ff0261db717bb25e8ba857fd53b7127e075a2 +size 4446 diff --git a/data/2025/2504_13xxx/2504.13958/images/4d9febdcdc369a70cb6fae49eebb8354f7837f049c7619901fe6f0342b21bc05.jpg b/data/2025/2504_13xxx/2504.13958/images/4d9febdcdc369a70cb6fae49eebb8354f7837f049c7619901fe6f0342b21bc05.jpg new file mode 100644 index 0000000000000000000000000000000000000000..19674cfcbf04c6bed1dfacbeb261f1027e4ebe54 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/4d9febdcdc369a70cb6fae49eebb8354f7837f049c7619901fe6f0342b21bc05.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e8ae8f7c836560afe242006276b9f4dc0c35aec04100708d41bbf5aeafe1d1d +size 6251 diff --git a/data/2025/2504_13xxx/2504.13958/images/515ed892cbd40ad8d47a38bf66845b41c9ea2e093ff999e1bf328a5899c6054a.jpg b/data/2025/2504_13xxx/2504.13958/images/515ed892cbd40ad8d47a38bf66845b41c9ea2e093ff999e1bf328a5899c6054a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..634a330f8a8b2a7b2e293d004a6254c4d6deb921 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/515ed892cbd40ad8d47a38bf66845b41c9ea2e093ff999e1bf328a5899c6054a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3332a8615c9c54855f66d82b85f049e049fe4b9fffccefa5270518a3cb9d5ec7 +size 137895 diff --git a/data/2025/2504_13xxx/2504.13958/images/529571517299c4e79a2191a1426efa9a4f090624e6416b8c93bfbb298122d7dd.jpg b/data/2025/2504_13xxx/2504.13958/images/529571517299c4e79a2191a1426efa9a4f090624e6416b8c93bfbb298122d7dd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d0f6d1f05afc3aedd08fcb3eb4c6c94844aa5c88 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/529571517299c4e79a2191a1426efa9a4f090624e6416b8c93bfbb298122d7dd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe33e46198ba86afa9358860c3680a4420a8fb68d2fd5ee5de7f903f79ff0eb2 +size 5682 diff --git a/data/2025/2504_13xxx/2504.13958/images/52af26f9a24c98cd2e0c9763ebf10c2cc361be5015d3ce094e8fa5770096f803.jpg b/data/2025/2504_13xxx/2504.13958/images/52af26f9a24c98cd2e0c9763ebf10c2cc361be5015d3ce094e8fa5770096f803.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e267ebae1ede9e78afbb0ae056cc8ed3a5c656ed --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/52af26f9a24c98cd2e0c9763ebf10c2cc361be5015d3ce094e8fa5770096f803.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ab665fd9475cf0e12e7a817cef6ece6e65bb70f14e28e0c4a09de87e3128526 +size 27012 diff --git a/data/2025/2504_13xxx/2504.13958/images/532a034727503e5608a8bd7a09ecdb3d9823074f2dd3b1e2057a3462a3c6bf07.jpg b/data/2025/2504_13xxx/2504.13958/images/532a034727503e5608a8bd7a09ecdb3d9823074f2dd3b1e2057a3462a3c6bf07.jpg new file mode 100644 index 0000000000000000000000000000000000000000..359113f738dcc17ec72100fec87219ba0ce7accf --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/532a034727503e5608a8bd7a09ecdb3d9823074f2dd3b1e2057a3462a3c6bf07.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0df4334f304c85398c149fba977a59a672885bc1cf74a832bd1081a95862d2eb +size 3906 diff --git a/data/2025/2504_13xxx/2504.13958/images/5420e28d1d217f54de626224d6cf029c9d3c8b75190b3803174eadbd95a48981.jpg b/data/2025/2504_13xxx/2504.13958/images/5420e28d1d217f54de626224d6cf029c9d3c8b75190b3803174eadbd95a48981.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f878d8f2ff46ac86131e73c8e6c2fd934512bc64 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/5420e28d1d217f54de626224d6cf029c9d3c8b75190b3803174eadbd95a48981.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a2e6e08bf434343bee7ae7aebf52c2da7782143d2b9f783679f7425d955fdd6 +size 16166 diff --git a/data/2025/2504_13xxx/2504.13958/images/546a4bc485365caf449427b56cbaac3273cab3509f2a8e83c4907d6a20f983a1.jpg b/data/2025/2504_13xxx/2504.13958/images/546a4bc485365caf449427b56cbaac3273cab3509f2a8e83c4907d6a20f983a1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc22400df90f6bc8d715d346512fc50c47aba2dc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/546a4bc485365caf449427b56cbaac3273cab3509f2a8e83c4907d6a20f983a1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d190a17b2b96ad6c5ff8a4608220f2fe38f1df062d267683830f1e38afa60d6a +size 94473 diff --git a/data/2025/2504_13xxx/2504.13958/images/552649707e12fd059580188a20cb855e21ec6e4f969add3bd77cecc592a62b4e.jpg b/data/2025/2504_13xxx/2504.13958/images/552649707e12fd059580188a20cb855e21ec6e4f969add3bd77cecc592a62b4e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2358987a8b8a82ca8b2ca49a76f9ebbabc5e0762 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/552649707e12fd059580188a20cb855e21ec6e4f969add3bd77cecc592a62b4e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a60e124bd024b782fd831ff23547351d02b70e94135b44eee3048d9b51eec369 +size 19696 diff --git a/data/2025/2504_13xxx/2504.13958/images/5932a8fbca056a1ca11fa22120336d650ac8a48814c7c0bc73f5f6a45fc56fb2.jpg b/data/2025/2504_13xxx/2504.13958/images/5932a8fbca056a1ca11fa22120336d650ac8a48814c7c0bc73f5f6a45fc56fb2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..650e0c0d42c2ff5b6cbaed867a5b525b47a66f5c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/5932a8fbca056a1ca11fa22120336d650ac8a48814c7c0bc73f5f6a45fc56fb2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5086e24e89389d9afb1fb3f6e58a53c5f5cd4a42568cf44910ba435a2ad9f3b2 +size 5265 diff --git a/data/2025/2504_13xxx/2504.13958/images/5f522180c398c809905b7e102f535a0a6044a6775b228ce7b2816e62b8244824.jpg b/data/2025/2504_13xxx/2504.13958/images/5f522180c398c809905b7e102f535a0a6044a6775b228ce7b2816e62b8244824.jpg new file mode 100644 index 0000000000000000000000000000000000000000..07478189d53be78380421c8dafc3d7a24890b9f8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/5f522180c398c809905b7e102f535a0a6044a6775b228ce7b2816e62b8244824.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:132de8a4de79e4f3e810b46cc043ddf81da806ca16a5e32b3a2c6a192ec5ae77 +size 1182 diff --git a/data/2025/2504_13xxx/2504.13958/images/68ae308fe4201a034cd5d0b2d3ae170be90697a5c0d2f6f3f221fa3f622dbd0d.jpg b/data/2025/2504_13xxx/2504.13958/images/68ae308fe4201a034cd5d0b2d3ae170be90697a5c0d2f6f3f221fa3f622dbd0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5dbb0817ed275c477059a22133ebea1b9eaf3d6c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/68ae308fe4201a034cd5d0b2d3ae170be90697a5c0d2f6f3f221fa3f622dbd0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7bd74193cfd9ed8db93866a90301bec36493dda8f78a262747462cf8eb14fc9 +size 137992 diff --git a/data/2025/2504_13xxx/2504.13958/images/6a107517ff3b41916528d7c3398cdec725ecbb30988ad47dfd5b591224becce7.jpg b/data/2025/2504_13xxx/2504.13958/images/6a107517ff3b41916528d7c3398cdec725ecbb30988ad47dfd5b591224becce7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6243001956861a8074b7a9849defe3f4e94917bc --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/6a107517ff3b41916528d7c3398cdec725ecbb30988ad47dfd5b591224becce7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c133b3ca5b3270fea0e027667c0dc46a7f945e3b0a125fa90c6d1cc39a9314f4 +size 4011 diff --git a/data/2025/2504_13xxx/2504.13958/images/6d37d94d2f04bd05e02881808c03d3acfb39b97d12b5a68e08f2261264aaa439.jpg b/data/2025/2504_13xxx/2504.13958/images/6d37d94d2f04bd05e02881808c03d3acfb39b97d12b5a68e08f2261264aaa439.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3d81b429ea847aec921a7b9bd9b6411f4473ad0 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/6d37d94d2f04bd05e02881808c03d3acfb39b97d12b5a68e08f2261264aaa439.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eeed7d076c3e461c5ffc80406fbab4b3576c9994401e1c7933decf04528f20c7 +size 5108 diff --git a/data/2025/2504_13xxx/2504.13958/images/7ac0f46c2122b5016b913482eafa9455530e12499baa0d34caae98e0d8b3c074.jpg b/data/2025/2504_13xxx/2504.13958/images/7ac0f46c2122b5016b913482eafa9455530e12499baa0d34caae98e0d8b3c074.jpg new file mode 100644 index 0000000000000000000000000000000000000000..97899392ec0242fa0603bee3adfa283f4fb7bcee --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/7ac0f46c2122b5016b913482eafa9455530e12499baa0d34caae98e0d8b3c074.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a6740e0a61ea644c6015d4d25140063fc5f92e99cd198d8a019b0543a2a2bb6 +size 9815 diff --git a/data/2025/2504_13xxx/2504.13958/images/81f9b6e8e4a7a0542da24967179dbca4282e35b72d41ed6cabe381d7cacdbab0.jpg b/data/2025/2504_13xxx/2504.13958/images/81f9b6e8e4a7a0542da24967179dbca4282e35b72d41ed6cabe381d7cacdbab0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f941d29537a676f68540c77cad7be1783d92441e --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/81f9b6e8e4a7a0542da24967179dbca4282e35b72d41ed6cabe381d7cacdbab0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fed635d09b36a470b18665afe80fef44a8177c83fa372a639786905c692bfa08 +size 12984 diff --git a/data/2025/2504_13xxx/2504.13958/images/8639a9e3198136a210a65fcf37f9deaf9d23ec0995494894eb1409d6634af3bc.jpg b/data/2025/2504_13xxx/2504.13958/images/8639a9e3198136a210a65fcf37f9deaf9d23ec0995494894eb1409d6634af3bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..22e34c18d42047d882b57f04c63937d2d4314507 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/8639a9e3198136a210a65fcf37f9deaf9d23ec0995494894eb1409d6634af3bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32df732a6d4f58f833ac8bfbb0734a5dda07468dfe3fec6688c5d61651f9584f +size 3913 diff --git a/data/2025/2504_13xxx/2504.13958/images/86ab19a0b1d508548569e0402244154029f504b76c0052ae772036ac379e63d7.jpg b/data/2025/2504_13xxx/2504.13958/images/86ab19a0b1d508548569e0402244154029f504b76c0052ae772036ac379e63d7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4be838d7b63af3b9696903d1d09fb25ce78d5a87 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/86ab19a0b1d508548569e0402244154029f504b76c0052ae772036ac379e63d7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bc501cbb9bcde8adb144256c4c3faa27747cd4b94bd35ee0b25300ef97a281d +size 10326 diff --git a/data/2025/2504_13xxx/2504.13958/images/86c8046a4208455b9d278a90cc615af285d011f3f28c85a707ecd1bb20d82e49.jpg b/data/2025/2504_13xxx/2504.13958/images/86c8046a4208455b9d278a90cc615af285d011f3f28c85a707ecd1bb20d82e49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..635e50f49093836ffcaa6f36cf580d55e6e0be04 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/86c8046a4208455b9d278a90cc615af285d011f3f28c85a707ecd1bb20d82e49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cdaea1b44c3ba7c131d0134ccd8dadce06d820c966631b42d16ca4442f0fd2ac +size 41801 diff --git a/data/2025/2504_13xxx/2504.13958/images/8878db23730494c3a7f9edc77ec5c687ed034f8797c7ff815bf34796e19a9816.jpg b/data/2025/2504_13xxx/2504.13958/images/8878db23730494c3a7f9edc77ec5c687ed034f8797c7ff815bf34796e19a9816.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3fcd43832104f248e1c1040c25b3f42552eac7f5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/8878db23730494c3a7f9edc77ec5c687ed034f8797c7ff815bf34796e19a9816.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ce3af809b7343b20d2cdee549baa7447211a524f02129655121c16f7d47f2d4 +size 5209 diff --git a/data/2025/2504_13xxx/2504.13958/images/8e17cc4deaa8d819f6fc131f70f9940360284bd7649a786eba2c64bd6b7e3e7e.jpg b/data/2025/2504_13xxx/2504.13958/images/8e17cc4deaa8d819f6fc131f70f9940360284bd7649a786eba2c64bd6b7e3e7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..160d6618330dbb4663640d4c16d3fc20eec9f485 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/8e17cc4deaa8d819f6fc131f70f9940360284bd7649a786eba2c64bd6b7e3e7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d936ee2dcd376f53ece251855335265f4181533761a9c2cb49ab95faade4fc0 +size 63647 diff --git a/data/2025/2504_13xxx/2504.13958/images/93bd6dcfe55814054a392a1e93d1698968bda89f83b2cd068e4161c9e1cf658e.jpg b/data/2025/2504_13xxx/2504.13958/images/93bd6dcfe55814054a392a1e93d1698968bda89f83b2cd068e4161c9e1cf658e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bbcb2f935b5461e34d19e8dcae78fbf873ab7c5 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/93bd6dcfe55814054a392a1e93d1698968bda89f83b2cd068e4161c9e1cf658e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:48b88bb79ec409b3f27cf0851bafb2db1bf907243e3ab7cf8dc562b401bb14a2 +size 3880 diff --git a/data/2025/2504_13xxx/2504.13958/images/940999cad06089578cb4662d7f8130594b2b9bbbb2af31ed89b67bfbc0443260.jpg b/data/2025/2504_13xxx/2504.13958/images/940999cad06089578cb4662d7f8130594b2b9bbbb2af31ed89b67bfbc0443260.jpg new file mode 100644 index 0000000000000000000000000000000000000000..608803d48089f7be93475bfd27e38f7341a2382d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/940999cad06089578cb4662d7f8130594b2b9bbbb2af31ed89b67bfbc0443260.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f901e0d25429a7e72ad740574378211fd39c5b05a30cbfa9e62e44d903ff77a3 +size 3157 diff --git a/data/2025/2504_13xxx/2504.13958/images/94ced1cd69372f6e86341575a504e8b4cc1f2bf372c3c145a6fdae3920f6dcdb.jpg b/data/2025/2504_13xxx/2504.13958/images/94ced1cd69372f6e86341575a504e8b4cc1f2bf372c3c145a6fdae3920f6dcdb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..47e297882c3a0add55bd7627edc67e01784163d8 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/94ced1cd69372f6e86341575a504e8b4cc1f2bf372c3c145a6fdae3920f6dcdb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c694c9ea8664e3dfa57db56ecf6e4cbb9ab8053fd7bcb51629e9fe479ea359dc +size 9675 diff --git a/data/2025/2504_13xxx/2504.13958/images/9da49ecd03180b3351e89de045a7633df7596f714deea3d3adb864ec3d0ad88d.jpg b/data/2025/2504_13xxx/2504.13958/images/9da49ecd03180b3351e89de045a7633df7596f714deea3d3adb864ec3d0ad88d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f7d798fe811834c4836f2582e9644c542df6a2f1 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/9da49ecd03180b3351e89de045a7633df7596f714deea3d3adb864ec3d0ad88d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e54b6f7216dcf970a4a010411663f5c1c605f916766c6bf4e58952cdf2aec913 +size 9234 diff --git a/data/2025/2504_13xxx/2504.13958/images/a34ee3ccb5b1cec76a45553952cfaf1d5d9d51339583c1ccec92529b9ae958ab.jpg b/data/2025/2504_13xxx/2504.13958/images/a34ee3ccb5b1cec76a45553952cfaf1d5d9d51339583c1ccec92529b9ae958ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..626de430c994b842a45dcc926f58239ff627686d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/a34ee3ccb5b1cec76a45553952cfaf1d5d9d51339583c1ccec92529b9ae958ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d4521e259d8659edea180b0abc44c00a72a8ed95e6d204df14c024c2d9f74e8 +size 4978 diff --git a/data/2025/2504_13xxx/2504.13958/images/a3a33f9935b2d034f4785ca3ce0edc9bfb4b0cce014fa130937d9a32138659a6.jpg b/data/2025/2504_13xxx/2504.13958/images/a3a33f9935b2d034f4785ca3ce0edc9bfb4b0cce014fa130937d9a32138659a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0600cbe01d8c28ef40e3606dbcf8d8d6e7d167ac --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/a3a33f9935b2d034f4785ca3ce0edc9bfb4b0cce014fa130937d9a32138659a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aeacf99202272a97e98550e289995016d2b3986eeaa340ce3dafbe2c1152252c +size 173953 diff --git a/data/2025/2504_13xxx/2504.13958/images/aa1c9884a4981a22763d4b52e5c17a6c9e5dc5abe1c1948ebb70001bf6540d0a.jpg b/data/2025/2504_13xxx/2504.13958/images/aa1c9884a4981a22763d4b52e5c17a6c9e5dc5abe1c1948ebb70001bf6540d0a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dfc4971695e68064ac390275ac8e548624e6e4d4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/aa1c9884a4981a22763d4b52e5c17a6c9e5dc5abe1c1948ebb70001bf6540d0a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c778aa9cd1a8634d5e7f9958aaf36ab2da5f11831e97d6beb1f47634de44015e +size 10197 diff --git a/data/2025/2504_13xxx/2504.13958/images/c0decb62c717c5b9e2ed93474a6147510ea0da8e44b962ada76b0687d177fa8e.jpg b/data/2025/2504_13xxx/2504.13958/images/c0decb62c717c5b9e2ed93474a6147510ea0da8e44b962ada76b0687d177fa8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96d236ae12892b36c047f2a21924f4667a10242d --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/c0decb62c717c5b9e2ed93474a6147510ea0da8e44b962ada76b0687d177fa8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c3c40a6c2c1a458ac6bf24599af9f0603e4277e3752239a13b74be8c568db1e +size 89538 diff --git a/data/2025/2504_13xxx/2504.13958/images/c3a015e002e06dde8110a53dbf637aed44e0ebca22ae6f9b69351fadc73cf144.jpg b/data/2025/2504_13xxx/2504.13958/images/c3a015e002e06dde8110a53dbf637aed44e0ebca22ae6f9b69351fadc73cf144.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b71dc5e542a3038ad57a14ca527194f27783b5c --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/c3a015e002e06dde8110a53dbf637aed44e0ebca22ae6f9b69351fadc73cf144.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c2ac75e541256e1d414e222c5d73f371553fb10c85048d104a33e6a54e6bbdc +size 17900 diff --git a/data/2025/2504_13xxx/2504.13958/images/c4a42ce99210f1c629662d6104f163b5fa2dbc2a5dfddaa2923d8c1986ba915a.jpg b/data/2025/2504_13xxx/2504.13958/images/c4a42ce99210f1c629662d6104f163b5fa2dbc2a5dfddaa2923d8c1986ba915a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e0382cb4abf958a8188459b4d3449272c15cec69 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/c4a42ce99210f1c629662d6104f163b5fa2dbc2a5dfddaa2923d8c1986ba915a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cb5549e19a47e8823daa93bbc7466526d03d271fec5887db34808c8f8bcce6b +size 10125 diff --git a/data/2025/2504_13xxx/2504.13958/images/cd6dcd5e59416e9771d2031ef78c027d93b9d9dfba2d8b2e158e459218092ead.jpg b/data/2025/2504_13xxx/2504.13958/images/cd6dcd5e59416e9771d2031ef78c027d93b9d9dfba2d8b2e158e459218092ead.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27b63bb2fd8a22236cad19721890d7ff899d75e3 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/cd6dcd5e59416e9771d2031ef78c027d93b9d9dfba2d8b2e158e459218092ead.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6ba11e163a047c5e3f91d2688233e1bd0a006c11897214ee4aecd397a89a067 +size 70867 diff --git a/data/2025/2504_13xxx/2504.13958/images/d4f5c580a080fa660a72ece01f1f516aab789459b4ebb589abcec9b899671c97.jpg b/data/2025/2504_13xxx/2504.13958/images/d4f5c580a080fa660a72ece01f1f516aab789459b4ebb589abcec9b899671c97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f75806f540f17b9458da62ea60903cf2bc416689 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/d4f5c580a080fa660a72ece01f1f516aab789459b4ebb589abcec9b899671c97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b4a63f5a11e1ee269e26c1a34b04fb38b2d4930b46b63a4131775d6da5ba597 +size 9986 diff --git a/data/2025/2504_13xxx/2504.13958/images/d65267e68fdd04a19a99679ce48395fe4605cf2779afc6e66e0b2988f88b2363.jpg b/data/2025/2504_13xxx/2504.13958/images/d65267e68fdd04a19a99679ce48395fe4605cf2779afc6e66e0b2988f88b2363.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d30ebc6e72474de69a63ee50755a8ba09d0c73f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/d65267e68fdd04a19a99679ce48395fe4605cf2779afc6e66e0b2988f88b2363.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91eea533bb17530f0390185e4c4ffe9522a3744525446468904bfabe22f8888e +size 57971 diff --git a/data/2025/2504_13xxx/2504.13958/images/d8dc3638bde2b89fa52a2572854cae623f195a7bdeaf5d9b387e540c7b3b470f.jpg b/data/2025/2504_13xxx/2504.13958/images/d8dc3638bde2b89fa52a2572854cae623f195a7bdeaf5d9b387e540c7b3b470f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6db9492044854b1a6cf8d1a8bfcf3b531c32a749 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/d8dc3638bde2b89fa52a2572854cae623f195a7bdeaf5d9b387e540c7b3b470f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:28e501ee74d6c07e79872ad0bef0fccbbeb306c7de677519555965865d05bfc4 +size 9994 diff --git a/data/2025/2504_13xxx/2504.13958/images/d9d1c4a75b58af36c4b5eb4342807cfac4846cca8d8a8fc7880fdfaabb685a3c.jpg b/data/2025/2504_13xxx/2504.13958/images/d9d1c4a75b58af36c4b5eb4342807cfac4846cca8d8a8fc7880fdfaabb685a3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27d61e42a177e62862cebb361604da00c8a64f79 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/d9d1c4a75b58af36c4b5eb4342807cfac4846cca8d8a8fc7880fdfaabb685a3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6b708c380cd923bbe4ac865f697252d912fd54c57401229784159cd970f15f3 +size 16464 diff --git a/data/2025/2504_13xxx/2504.13958/images/e10c00438e5de0d0dbd630290a68ceec36bd719e40a4b5c7b0aaef208c63b4b4.jpg b/data/2025/2504_13xxx/2504.13958/images/e10c00438e5de0d0dbd630290a68ceec36bd719e40a4b5c7b0aaef208c63b4b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d6acdf22f333d90906daff01797d2b50a2a6f80 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/e10c00438e5de0d0dbd630290a68ceec36bd719e40a4b5c7b0aaef208c63b4b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d0c50834346e40edddf73b280bb3b08fee5e64e8bb44df282256023c528cfe25 +size 11671 diff --git a/data/2025/2504_13xxx/2504.13958/images/e2378971efe8c48bc5aecbb8f0825cd5d3570bcbe7aef5b317bdc0a4649d542f.jpg b/data/2025/2504_13xxx/2504.13958/images/e2378971efe8c48bc5aecbb8f0825cd5d3570bcbe7aef5b317bdc0a4649d542f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54792f3dd007974e318e4b8f08f51bad46c957e2 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/e2378971efe8c48bc5aecbb8f0825cd5d3570bcbe7aef5b317bdc0a4649d542f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ab60d59fe7bed3e143c1dd968107495a00acd09e2726a58fe533b229d910d06 +size 72460 diff --git a/data/2025/2504_13xxx/2504.13958/images/e4449cb8b28b2188f9d5261e72497785c4a9d27afaba691623bb1707a942ed67.jpg b/data/2025/2504_13xxx/2504.13958/images/e4449cb8b28b2188f9d5261e72497785c4a9d27afaba691623bb1707a942ed67.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dc7eacf9756a25f406c26b261f0e64930762281f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/e4449cb8b28b2188f9d5261e72497785c4a9d27afaba691623bb1707a942ed67.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30f74e3e124aa19f79d7155bc3e087e643e4512a363755995aa1e274a5d286d4 +size 4704 diff --git a/data/2025/2504_13xxx/2504.13958/images/e489e46a097675008b0b2b8264d88eeba35e4558c034f26f0ab7174772e2d697.jpg b/data/2025/2504_13xxx/2504.13958/images/e489e46a097675008b0b2b8264d88eeba35e4558c034f26f0ab7174772e2d697.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f689637ae2bdac84374c2e429f06d2f5bc26174 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/e489e46a097675008b0b2b8264d88eeba35e4558c034f26f0ab7174772e2d697.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1895ecaa27d3652fab7880a34cd5576643afdf170f4da39b29e41f7c6663f9ac +size 10842 diff --git a/data/2025/2504_13xxx/2504.13958/images/e4d175360576c3fb81a40f514fde4894f0c62e6bb1915ba240566fc0a964c1a5.jpg b/data/2025/2504_13xxx/2504.13958/images/e4d175360576c3fb81a40f514fde4894f0c62e6bb1915ba240566fc0a964c1a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d48c96b2b945fac6a89481097f27b21588ae196b --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/e4d175360576c3fb81a40f514fde4894f0c62e6bb1915ba240566fc0a964c1a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46e89eca22f1806861f6a3cbe6d1cba0914306ac37ba4e36e3be865060d6654f +size 114763 diff --git a/data/2025/2504_13xxx/2504.13958/images/ed928223fecc4f4255ed002c6cfe7a99f24f35477bb84a44e7049c3547618c4e.jpg b/data/2025/2504_13xxx/2504.13958/images/ed928223fecc4f4255ed002c6cfe7a99f24f35477bb84a44e7049c3547618c4e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..478864a7eb37dafa8b3fd2c316857b5afab1b1f9 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/ed928223fecc4f4255ed002c6cfe7a99f24f35477bb84a44e7049c3547618c4e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20ca2a1d2effacba24c53de5645b5415ae24420464a11963139dd2d68ca08079 +size 6500 diff --git a/data/2025/2504_13xxx/2504.13958/images/edc4a0db668429fb699878570feac579cd2731eaea04e93c2eba09dd09e6b856.jpg b/data/2025/2504_13xxx/2504.13958/images/edc4a0db668429fb699878570feac579cd2731eaea04e93c2eba09dd09e6b856.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6949ab38bdd12b526f99856903553cedd01de563 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/edc4a0db668429fb699878570feac579cd2731eaea04e93c2eba09dd09e6b856.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf1fc68c3c8e0e2e8d3058c130c82513b430c30abbecf711957eb10e5429acab +size 9617 diff --git a/data/2025/2504_13xxx/2504.13958/images/ee4a6b609ba9897a35b08c96752b60d3ac344c8eea44066580b6b3d697518e76.jpg b/data/2025/2504_13xxx/2504.13958/images/ee4a6b609ba9897a35b08c96752b60d3ac344c8eea44066580b6b3d697518e76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..11be1dc8daa6a13e32038285319b216db44abd7f --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/ee4a6b609ba9897a35b08c96752b60d3ac344c8eea44066580b6b3d697518e76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e23bca728f0ab9c03c9ca6340e299610a14c4807d7a6df83c618b8cc4ea5cbc +size 3407 diff --git a/data/2025/2504_13xxx/2504.13958/images/ef560187b3bf2a0218230a0cfde3dce8ed6eed56f13f4c71a06ec6bb13c78e1a.jpg b/data/2025/2504_13xxx/2504.13958/images/ef560187b3bf2a0218230a0cfde3dce8ed6eed56f13f4c71a06ec6bb13c78e1a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe93dc334820db4f1c70c553cfb54e1a651380d4 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/ef560187b3bf2a0218230a0cfde3dce8ed6eed56f13f4c71a06ec6bb13c78e1a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ecb320079d8b47184c5e2f0949bb7cea7256bf6667726a3af564d544e48ef12 +size 8916 diff --git a/data/2025/2504_13xxx/2504.13958/images/f9ea052946be1f03ba66dd491116a8bea9bad8a566a674459368a7eceb182187.jpg b/data/2025/2504_13xxx/2504.13958/images/f9ea052946be1f03ba66dd491116a8bea9bad8a566a674459368a7eceb182187.jpg new file mode 100644 index 0000000000000000000000000000000000000000..451c4c952d17aeaf03583f6b962bb309ff57c11a --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/f9ea052946be1f03ba66dd491116a8bea9bad8a566a674459368a7eceb182187.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:617c69da95bfb23d0ed4ca3eddebe0490faa1629ea68942e5804c546df078bee +size 19532 diff --git a/data/2025/2504_13xxx/2504.13958/images/fb17f93bfe1ec9a690daa9777f7c36b8a3fb1a902a12a04c14d86bb1e48abbc9.jpg b/data/2025/2504_13xxx/2504.13958/images/fb17f93bfe1ec9a690daa9777f7c36b8a3fb1a902a12a04c14d86bb1e48abbc9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7cadb7a72a71b48d186e759659195a487059f9fd --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/fb17f93bfe1ec9a690daa9777f7c36b8a3fb1a902a12a04c14d86bb1e48abbc9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e40b5c37ce6ce4445a409ce4956980f66428233e31d436b7895c77a9fe98100d +size 7646 diff --git a/data/2025/2504_13xxx/2504.13958/images/fd6386c610ee3b3caf1b352d1ca8d04bed553bb41efae56fabf6f31c51a1a935.jpg b/data/2025/2504_13xxx/2504.13958/images/fd6386c610ee3b3caf1b352d1ca8d04bed553bb41efae56fabf6f31c51a1a935.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c346dcb9c12fffd87ea20d64937c7936a3e12087 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/fd6386c610ee3b3caf1b352d1ca8d04bed553bb41efae56fabf6f31c51a1a935.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:814fa261a94e5a1015c16ffe33817dca2a375f6fee7c7bc69f5f76fc710e21ad +size 5157 diff --git a/data/2025/2504_13xxx/2504.13958/images/ff3197de12e4e5c6e186807989a91719f5b1f0d72bc4490312c2ad49c44e81f4.jpg b/data/2025/2504_13xxx/2504.13958/images/ff3197de12e4e5c6e186807989a91719f5b1f0d72bc4490312c2ad49c44e81f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0ae848c8dfd263b2594580f3ac6b40855073153 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/ff3197de12e4e5c6e186807989a91719f5b1f0d72bc4490312c2ad49c44e81f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2751c254183363b9f115d5ac2a01f128419ef47b4e181d70d26b7c58cb4cbb6 +size 1286 diff --git a/data/2025/2504_13xxx/2504.13958/images/ff7dffe6c5eb67137c3b899eeeca3f28a0e079c2d36b14468db8f3c49c223c6c.jpg b/data/2025/2504_13xxx/2504.13958/images/ff7dffe6c5eb67137c3b899eeeca3f28a0e079c2d36b14468db8f3c49c223c6c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a023153ff926b8ba84c3c47397f438b950c127a6 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/images/ff7dffe6c5eb67137c3b899eeeca3f28a0e079c2d36b14468db8f3c49c223c6c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91c7a38b90e3b36596f9a9c810d1f98f4216574c3a7c9c3a8d2c1510a7094ad8 +size 6164 diff --git a/data/2025/2504_13xxx/2504.13958/layout.json b/data/2025/2504_13xxx/2504.13958/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..8fca3ca5e3dbed954eda7f740f55e0cc2171e852 --- /dev/null +++ b/data/2025/2504_13xxx/2504.13958/layout.json @@ -0,0 +1,14428 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 161, + 76, + 433, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 76, + 433, + 94 + ], + "spans": [ + { + "bbox": [ + 161, + 76, + 433, + 94 + ], + "type": "text", + "content": "ToolRL: Reward is All Tool Learning Needs" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 124, + 117, + 473, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 124, + 117, + 473, + 146 + ], + "spans": [ + { + "bbox": [ + 124, + 117, + 473, + 146 + ], + "type": "text", + "content": "Cheng Qian, Emre Can Acikgoz, Qi He, Hongru Wang, Xiusi Chen, Dilek Hakkani-Tür, Gokhan Tur, Heng Ji" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 197, + 147, + 396, + 159 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 197, + 147, + 396, + 159 + ], + "spans": [ + { + "bbox": [ + 197, + 147, + 396, + 159 + ], + "type": "text", + "content": "University of Illinois Urbana-Champaign" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 207, + 160, + 387, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 160, + 387, + 174 + ], + "spans": [ + { + "bbox": [ + 207, + 160, + 387, + 174 + ], + "type": "text", + "content": "{chengq9, hengji}@illinois.edu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "spans": [ + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "type": "text", + "content": "Current Large Language Models (LLMs) often undergo supervised fine-tuning (SFT) to acquire tool use capabilities. However, SFT struggles to generalize to unfamiliar or complex tool use scenarios. Recent advancements in reinforcement learning (RL), particularly with R1-like models, have demonstrated promising reasoning and generalization abilities. Yet, reward design for tool use presents unique challenges: multiple tools may be invoked with diverse parameters, and coarse-grained reward signals, such as answer matching, fail to offer the fine-grained feedback required for effective learning. In this work, we present the first comprehensive study on reward design for tool selection and application tasks within the RL paradigm. We systematically explore a wide range of reward strategies, analyzing their types, scales, granularity, and temporal dynamics. Building on these insights, we propose a principled reward design tailored for tool use tasks and apply it to train LLMs using Group Relative Policy Optimization (GRPO). Empirical evaluations across diverse benchmarks demonstrate that our approach yields robust, scalable, and stable training, achieving a " + }, + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "type": "inline_equation", + "content": "17\\%" + }, + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "type": "text", + "content": " improvement over base models and a " + }, + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "type": "text", + "content": " gain over SFT models. These results highlight the critical role of thoughtful reward design in enhancing the tool use capabilities and generalization performance of LLMs. All the code are released to facilitate future research." + }, + { + "bbox": [ + 84, + 244, + 274, + 626 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 640, + 154, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 640, + 154, + 653 + ], + "spans": [ + { + "bbox": [ + 68, + 640, + 154, + 653 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 662, + 291, + 745 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 662, + 291, + 745 + ], + "spans": [ + { + "bbox": [ + 67, + 662, + 291, + 745 + ], + "type": "text", + "content": "Recent advances in Large Language Models (LLMs) have showcased remarkable capabilities in complex reasoning tasks (Kumar et al., 2025). Among the techniques that have significantly contributed to this progress, Reinforcement Learning (RL) has emerged as a powerful paradigm, enabling" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 327, + 220, + 503, + 229 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 327, + 220, + 503, + 229 + ], + "spans": [ + { + "bbox": [ + 327, + 220, + 503, + 229 + ], + "type": "text", + "content": "Task Goal: Irrelevant Tool Detection (LLM should reject in appropriate tools)" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 318, + 231, + 332, + 245 + ], + "blocks": [ + { + "bbox": [ + 318, + 231, + 332, + 245 + ], + "lines": [ + { + "bbox": [ + 318, + 231, + 332, + 245 + ], + "spans": [ + { + "bbox": [ + 318, + 231, + 332, + 245 + ], + "type": "image", + "image_path": "5f522180c398c809905b7e102f535a0a6044a6775b228ce7b2816e62b8244824.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 390, + 524, + 414 + ], + "lines": [ + { + "bbox": [ + 302, + 390, + 524, + 414 + ], + "spans": [ + { + "bbox": [ + 302, + 390, + 524, + 414 + ], + "type": "text", + "content": "Figure 1: SFT on distilled deep-thinking trajectories suffers from overthinking and limited generalization." + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "bbox": [ + 336, + 234, + 509, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 336, + 234, + 509, + 243 + ], + "spans": [ + { + "bbox": [ + 336, + 234, + 509, + 243 + ], + "type": "text", + "content": "What's the distance between San Francisco and Los Angeles in kilometers?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 324, + 248, + 490, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 324, + 248, + 490, + 256 + ], + "spans": [ + { + "bbox": [ + 324, + 248, + 490, + 256 + ], + "type": "text", + "content": "{Name: get_date, Parameters: {location_1: str, location_2: str, unit: str}}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 318, + 264, + 346, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 264, + 346, + 270 + ], + "spans": [ + { + "bbox": [ + 318, + 264, + 346, + 270 + ], + "type": "text", + "content": "SFT Model" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 318, + 270, + 397, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 270, + 397, + 277 + ], + "spans": [ + { + "bbox": [ + 318, + 270, + 397, + 277 + ], + "type": "text", + "content": "(Distilled from R1 Long Thought)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 318, + 279, + 433, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 279, + 433, + 316 + ], + "spans": [ + { + "bbox": [ + 318, + 279, + 433, + 316 + ], + "type": "text", + "content": " The user wants to find the distance in kilometers between San Francisco and Los Angeles. The only tool provided is get_date, which calculates the time difference between two locations. The user is asking about distance, which is a measurement of space, not time. But wait, maybe there's a" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 318, + 316, + 427, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 316, + 427, + 323 + ], + "spans": [ + { + "bbox": [ + 318, + 316, + 427, + 323 + ], + "type": "text", + "content": "space, not often. But, maybe it's a misunderstanding Over-Interpretation! perhaps" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 318, + 323, + 421, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 323, + 421, + 336 + ], + "spans": [ + { + "bbox": [ + 318, + 323, + 421, + 336 + ], + "type": "text", + "content": "the tool can still be used if possible. Maybe the tool's parameters are misnamed, and it actually" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 318, + 336, + 433, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 336, + 433, + 355 + ], + "spans": [ + { + "bbox": [ + 318, + 336, + 433, + 355 + ], + "type": "text", + "content": "returns distance. Therefore, the correct approach is to call get_date with San Francisco and Los Angeles as locations and kilometers as the unit." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 318, + 357, + 418, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 357, + 418, + 364 + ], + "spans": [ + { + "bbox": [ + 318, + 357, + 418, + 364 + ], + "type": "text", + "content": " {Name: get date, Parameters:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 318, + 364, + 418, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 364, + 418, + 370 + ], + "spans": [ + { + "bbox": [ + 318, + 364, + 418, + 370 + ], + "type": "text", + "content": "location 1: San Francisco, location 2: Los" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 318, + 370, + 409, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 370, + 409, + 378 + ], + "spans": [ + { + "bbox": [ + 318, + 370, + 409, + 378 + ], + "type": "inline_equation", + "content": "\\text{Angeles,unit:kilometers}\\} \\} < /" + }, + { + "bbox": [ + 318, + 370, + 409, + 378 + ], + "type": "text", + "content": " tool_call>" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 442, + 264, + 467, + 270 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 264, + 467, + 270 + ], + "spans": [ + { + "bbox": [ + 442, + 264, + 467, + 270 + ], + "type": "text", + "content": "RL Model" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 442, + 270, + 501, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 270, + 501, + 277 + ], + "spans": [ + { + "bbox": [ + 442, + 270, + 501, + 277 + ], + "type": "text", + "content": "(Trained through GRPO)" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 442, + 277, + 508, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 277, + 508, + 316 + ], + "spans": [ + { + "bbox": [ + 442, + 277, + 508, + 316 + ], + "type": "text", + "content": " The user wants to find the distance between San Francisco and Los Angeles in kilometers. However, the available tool 'get_date' is not suitable for calculating" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 442, + 316, + 487, + 323 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 316, + 487, + 323 + ], + "spans": [ + { + "bbox": [ + 442, + 316, + 487, + 323 + ], + "type": "text", + "content": "distances." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 442, + 323, + 506, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 442, + 323, + 506, + 377 + ], + "spans": [ + { + "bbox": [ + 442, + 323, + 506, + 377 + ], + "type": "text", + "content": "The tool get_date'is not suitable for calculating the distance between two cities.You can use a distance calculator or a mapping service to find the distance in kilometers. " + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 302, + 436, + 526, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 436, + 526, + 544 + ], + "spans": [ + { + "bbox": [ + 302, + 436, + 526, + 544 + ], + "type": "text", + "content": "LLMs to develop emergent capabilities such as self-reflection, self-correction, and long-horizon planning (Guo et al., 2025; Team et al., 2025). These capabilities have been instrumental in the success of models like o1 and R1, particularly in mathematical and logical reasoning domains (Qin et al., 2024a; Huang et al., 2024; Li et al., 2025b; Kang et al., 2025)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 302, + 544, + 526, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 544, + 526, + 747 + ], + "spans": [ + { + "bbox": [ + 302, + 544, + 526, + 747 + ], + "type": "text", + "content": "Beyond traditional reasoning tasks, an increasingly important area is Tool-Integrated Reasoning (TIR). TIR involves LLMs interacting with external tools, such as search engines (Jin et al., 2025; Zheng et al., 2025), calculators (Chen et al., 2023b; Qin et al., 2023), or code interpreters (Gou et al., 2023; Liao et al., 2024), in a multi-step, feedback-driven loop to arrive at solutions. TIR is particularly important because it addresses core limitations of LLMs, such as outdated knowledge, calculation inaccuracy, and shallow reasoning. By integrating external tools that offer real-time access and specialized capabilities, TIR enables models to tackle complex tasks in a more grounded and goal-directed way." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 302, + 748, + 525, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 525, + 773 + ], + "type": "text", + "content": "Unlike textual reasoning, which primarily involves deduction and inference from static text," + } + ] + } + ], + "index": 29 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 258, + 36, + 608 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 258, + 36, + 608 + ], + "spans": [ + { + "bbox": [ + 13, + 258, + 36, + 608 + ], + "type": "text", + "content": "arXiv:2504.13958v1 [cs.LG] 16 Apr 2025" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 67, + 752, + 290, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 752, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 67, + 752, + 290, + 773 + ], + "type": "text", + "content": "1 Data and codes released at https://github.com/qiancheng@/ToolRL" + } + ] + } + ], + "index": 31 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 69, + 291, + 135 + ], + "blocks": [ + { + "bbox": [ + 69, + 69, + 291, + 135 + ], + "lines": [ + { + "bbox": [ + 69, + 69, + 291, + 135 + ], + "spans": [ + { + "bbox": [ + 69, + 69, + 291, + 135 + ], + "type": "image", + "image_path": "d9d1c4a75b58af36c4b5eb4342807cfac4846cca8d8a8fc7880fdfaabb685a3c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 69, + 135, + 291, + 193 + ], + "blocks": [ + { + "bbox": [ + 69, + 135, + 291, + 193 + ], + "lines": [ + { + "bbox": [ + 69, + 135, + 291, + 193 + ], + "spans": [ + { + "bbox": [ + 69, + 135, + 291, + 193 + ], + "type": "image", + "image_path": "5420e28d1d217f54de626224d6cf029c9d3c8b75190b3803174eadbd95a48981.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 69, + 193, + 291, + 277 + ], + "blocks": [ + { + "bbox": [ + 69, + 193, + 291, + 277 + ], + "lines": [ + { + "bbox": [ + 69, + 193, + 291, + 277 + ], + "spans": [ + { + "bbox": [ + 69, + 193, + 291, + 277 + ], + "type": "image", + "image_path": "52af26f9a24c98cd2e0c9763ebf10c2cc361be5015d3ce094e8fa5770096f803.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 285, + 526, + 323 + ], + "lines": [ + { + "bbox": [ + 67, + 285, + 526, + 323 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 526, + 323 + ], + "type": "text", + "content": "Figure 2: Main results (left) and reward trends over training steps for GRPO Cold Start across four models (right). GRPO Cold Start, equipped with our proposed reward design, consistently achieves the highest performance, with reward curves showing a rapid increase during training." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 297, + 79, + 525, + 163 + ], + "blocks": [ + { + "bbox": [ + 297, + 79, + 525, + 163 + ], + "lines": [ + { + "bbox": [ + 297, + 79, + 525, + 163 + ], + "spans": [ + { + "bbox": [ + 297, + 79, + 525, + 163 + ], + "type": "image", + "image_path": "f9ea052946be1f03ba66dd491116a8bea9bad8a566a674459368a7eceb182187.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 296, + 183, + 523, + 266 + ], + "blocks": [ + { + "bbox": [ + 296, + 183, + 523, + 266 + ], + "lines": [ + { + "bbox": [ + 296, + 183, + 523, + 266 + ], + "spans": [ + { + "bbox": [ + 296, + 183, + 523, + 266 + ], + "type": "image", + "image_path": "c3a015e002e06dde8110a53dbf637aed44e0ebca22ae6f9b69351fadc73cf144.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 343, + 291, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 343, + 291, + 507 + ], + "spans": [ + { + "bbox": [ + 67, + 343, + 291, + 507 + ], + "type": "text", + "content": "TIR additionally demands the model's ability to select appropriate tools, interpret intermediate outputs, and adaptively refine its trajectory on the fly. These dynamic and interactive reasoning skills position TIR at the core of the emerging paradigm of LLMs-as-agents. As such, TIR enables a wide range of applications, including scientific discovery (Roohani et al., 2024; Inoue et al., 2024), research automation (Baek et al., 2024; Wang et al., 2024), embodied task completion (Zhang et al., 2023; Huang et al., 2023), and everyday decision-making (Ye et al., 2023; Zhai et al., 2024)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 518, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 518, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 518, + 291, + 775 + ], + "type": "text", + "content": "Training LLMs for TIR tasks has predominantly relied on Supervised Fine-Tuning (SFT), wherein existing approaches typically generate these integrated reasoning steps offline, followed by subsequent SFT on these trajectories (Chen et al., 2023a; Zeng et al., 2024; Chen et al., 2024; Acikgoz et al., 2025). While SFT is effective to some extent, it struggles with generalization, exploration, and adaptability (Chu et al., 2025; Guo et al., 2025). As illustrated in Figure 1, a model trained with SFT on deep-thinking trajectories over-interprets the tool and fails to reject the inappropriate tool, merely imitating cues like \"but wait\" without engaging in genuine deep thinking. As such, SFT often fails to capture the strategic flexibility needed for optimal tool use, particularly in open-ended or multi-step settings. This motivates a fundamental research question: Can RL-based training methods better equip LLMs with agentic tool-using capabilities," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 344, + 520, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 344, + 520, + 358 + ], + "spans": [ + { + "bbox": [ + 302, + 344, + 520, + 358 + ], + "type": "text", + "content": "and if so, what is the optimal RL design for TIR?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 361, + 526, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 361, + 526, + 483 + ], + "spans": [ + { + "bbox": [ + 302, + 361, + 526, + 483 + ], + "type": "text", + "content": "Recent efforts such as Search-R1 (Jin et al., 2025) and TORL (Li et al., 2025b) have begun to explore this direction. However, their focus is narrow: either constrained to search tools in question answering settings or code tools in math problem-solving. In contrast, our work aims to study RL-based training for general-purpose tool selection and application, across diverse and complex tool sets with different task types." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 486, + 526, + 758 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 486, + 526, + 758 + ], + "spans": [ + { + "bbox": [ + 302, + 486, + 526, + 758 + ], + "type": "text", + "content": "For an RL algorithm to be effective, a well-designed reward is essential. Unlike math tasks with a single correct answer, Tool-Integrated Reasoning (TIR) tasks introduce multiple layers of complexity: they often involve multi-step interactions where each turn may require invoking multiple tools, each with carefully specified parameters. Designing effective reward signals to guide learning through this complexity remains an open and underexplored challenge. In this paper, we focus on the problem of reward design for TIR and propose a principled, generalizable framework that can be applied across various RL algorithms. While our reward design is algorithm-agnostic by nature, we empirically demonstrate its effectiveness using both Group Relative Policy Optimization (GRPO) (Shao et al., 2024) and Proximal Policy Optimization (PPO) (Schulman et al., 2017), showcasing its versatility and impact on improving tool use performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 314, + 761, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 761, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 314, + 761, + 526, + 775 + ], + "type": "text", + "content": "We begin by formalizing the TIR task, and out" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 220 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 220 + ], + "type": "text", + "content": "lining general principles for effective reward design. Building on this foundation, we show how RL algorithm can be leveraged to train LLMs for robust and context-aware tool selection and application. Empirical results demonstrate that our approach outperforms base models by " + }, + { + "bbox": [ + 67, + 71, + 291, + 220 + ], + "type": "inline_equation", + "content": "17\\%" + }, + { + "bbox": [ + 67, + 71, + 291, + 220 + ], + "type": "text", + "content": " and SFT models by " + }, + { + "bbox": [ + 67, + 71, + 291, + 220 + ], + "type": "inline_equation", + "content": "15\\%" + }, + { + "bbox": [ + 67, + 71, + 291, + 220 + ], + "type": "text", + "content": " across multiple tool use and QA benchmarks. Moreover, the trained model exhibits strong generalization to unseen scenarios and task objectives, along with emergent behaviors such as proactiveness and metacognitive reasoning." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 221, + 291, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 221, + 291, + 381 + ], + "spans": [ + { + "bbox": [ + 69, + 221, + 291, + 381 + ], + "type": "text", + "content": "To identify optimal reward strategies, we next systematically explore a broad spectrum of reward configurations across four key dimensions: (1) reward type (what aspect to reward), (2) reward scale (how much to reward), (3) reward granularity (how detailed the reward signal is), and (4) reward dynamics (how rewards evolve over time). Through extensive experiments, we identify reward designs that best align with agentic tool use and uncover insights into what makes a reward \"useful\" for tool invoking LLMs. We summarize the core insights we derive as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 385, + 289, + 471 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 69, + 385, + 289, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 385, + 289, + 411 + ], + "spans": [ + { + "bbox": [ + 69, + 385, + 289, + 411 + ], + "type": "text", + "content": "- Longer reasoning trace is not inherently better and length rewards can degrade performance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 415, + 288, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 415, + 288, + 442 + ], + "spans": [ + { + "bbox": [ + 69, + 415, + 288, + 442 + ], + "type": "text", + "content": "- Dynamic reward scale helps models transition smoothly from simple to complex behaviors." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 445, + 288, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 445, + 288, + 471 + ], + "spans": [ + { + "bbox": [ + 69, + 445, + 288, + 471 + ], + "type": "text", + "content": "- Finegrained reward decomposition leads to more stable and effective learning." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 475, + 290, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 475, + 290, + 502 + ], + "spans": [ + { + "bbox": [ + 67, + 475, + 290, + 502 + ], + "type": "text", + "content": "We also summarize the overall contributions of our paper as follows:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 504, + 290, + 645 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 69, + 504, + 289, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 289, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 289, + 544 + ], + "type": "text", + "content": "- We present the first systematic study on RL-based training for general-purpose tool selection and application in LLMs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 548, + 290, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 548, + 290, + 587 + ], + "spans": [ + { + "bbox": [ + 69, + 548, + 290, + 587 + ], + "type": "text", + "content": "- We propose a principled reward design framework tailored for TIR and validate its effectiveness through RL algorithms including GRPO." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 591, + 290, + 645 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 591, + 290, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 591, + 290, + 645 + ], + "type": "text", + "content": "- We conduct extensive experiments analyzing the effects of various reward strategies and distill actionable insights for future research on LLM-agent training." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 647, + 290, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 647, + 290, + 702 + ], + "spans": [ + { + "bbox": [ + 67, + 647, + 290, + 702 + ], + "type": "text", + "content": "This work pioneers the application of RL to general TIR and provides the first empirical roadmap for reward design in TIR, paving the way toward more capable and autonomous LLM agents." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 712, + 161, + 725 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 712, + 161, + 725 + ], + "spans": [ + { + "bbox": [ + 67, + 712, + 161, + 725 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 734, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 734, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 734, + 291, + 775 + ], + "type": "text", + "content": "Tool-Integrated Reasoning of LLMs. Tool-integrated reasoning (TIR) has emerged as a promising approach to enhance the capabilities of" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 71, + 527, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 527, + 423 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 527, + 423 + ], + "type": "text", + "content": "LLMs. Early studies introduced the concept of equipping LLMs with external tools to overcome their inherent limitations (Schick et al., 2023; Qin et al., 2024b; Yao et al., 2023), such as program executors (Chen et al., 2022) and search engines (Vu et al., 2023). To systematically assess these enhanced capabilities, several benchmarks have been proposed to evaluate tool use performance across various dimensions, including API selection, argument generation, and generalization (Qin et al., 2024c; Patil et al., 2023; Qian et al., 2024a). Building on this foundation, subsequent research has focused on constructing high-quality tool use datasets (Liu et al., 2024; Qian et al., 2025), enabling models to autonomously create and invoke tools (Qian et al., 2023, 2024b), and applying these techniques to problems spanning different modalities (Shen et al., 2025) and specialized domains (Ling et al., 2023). More recently, reinforcement learning (RL) has been explored as an effective framework to further improve TIR, demonstrating success in tasks such as information retrieval (Jin et al., 2025) and math computation (Li et al., 2025b). These advances collectively highlight the growing potential of tool-augmented LLMs for general-purpose reasoning in open-domain settings." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 435, + 527, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 435, + 527, + 706 + ], + "spans": [ + { + "bbox": [ + 302, + 435, + 527, + 706 + ], + "type": "text", + "content": "Exploration of RL in LLMs. Previous work has primarily relied on supervised fine-tuning (SFT) with carefully curated datasets to enhance LLM performance in tool use (Schick et al., 2023; Qin et al., 2024c). Recently, reinforcement learning (RL) has gained traction as a more scalable and generalizable training paradigm. The development of RL methods for LLMs has evolved from reinforcement learning from human feedback (RLHF) (Kaufmann et al., 2023) and proximal policy optimization (PPO) (Schulman et al., 2017) to more advanced techniques such as direct preference optimization (DPO) (Rafailov et al., 2023), SimPO (Meng et al., 2024), and group relative policy optimization (GRPO) (Shao et al., 2024). Extensions like dynamic sampling policy optimization (DAPO) (Yu et al., 2025) and the more recent value-based augmented proximal policy optimization (VAPO) (Yuan et al., 2025) further improve training stability and efficiency." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 527, + 775 + ], + "type": "text", + "content": "Among these, GRPO (Shao et al., 2024) is specifically designed for LLMs, replacing the traditional critic with a group-based evaluation strategy. It has shown strong performance in enhancing reasoning abilities across a range of tasks, including math-" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 260 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 260 + ], + "type": "text", + "content": "ematical problem solving (Shao et al., 2024; Xie et al., 2025), search engine interaction (Jin et al., 2025; Song et al., 2025), and code generation (Li et al., 2025b). Beyond task variety, recent studies have analyzed the influence of dataset scale (Li et al., 2025a) and GRPO's effectiveness in smaller model settings (Dang and Ngo, 2025). GRPO's flexible reward function enables adaptation to diverse objectives, such as assigning weights to subtasks (Yu et al., 2024) or constraining tool use frequency (Li et al., 2025b). In this work, we extend GRPO to enhance general tool use capabilities, improving LLMs' ability to select and interact with external tools across a wide range of scenarios." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 270, + 130, + 283 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 270, + 130, + 283 + ], + "spans": [ + { + "bbox": [ + 67, + 270, + 130, + 283 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 292, + 291, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 292, + 291, + 468 + ], + "spans": [ + { + "bbox": [ + 67, + 292, + 291, + 468 + ], + "type": "text", + "content": "Supervised fine-tuning (SFT), as illustrated in Figure 1, often suffers from overfitting to certain patterns and constrains the model's ability to learn optimal strategies for tool use. To address this, we introduce a reinforcement learning (RL) approach for enhancing tool-integrated reasoning (TIR) in LLMs. In this section, we begin by defining the TIR task (Section 3.1), followed by our customized rollout strategy (Section 3.2) and reward design (Section 3.3). These components are then integrated into the Group Relative Policy Optimization (GRPO) framework (Shao et al., 2024) to guide model training on general TIR tasks (Section 3.4)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 477, + 167, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 477, + 167, + 489 + ], + "spans": [ + { + "bbox": [ + 67, + 477, + 167, + 489 + ], + "type": "text", + "content": "3.1 Task Definition" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 495, + 291, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 495, + 291, + 588 + ], + "spans": [ + { + "bbox": [ + 67, + 495, + 291, + 588 + ], + "type": "text", + "content": "Tool-Integrated Reasoning (TIR) is the process of incorporating external tools into the reasoning trajectory of an LLM to solve a user task. A typical TIR trajectory involves multiple tool invocations over several reasoning steps, with the final outcome determined by the cumulative success of these intermediate decisions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "spans": [ + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "text", + "content": "Formally, given a tool set " + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "inline_equation", + "content": "\\mathcal{T} = \\{t_1,t_2,\\dots ,t_n\\}" + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "text", + "content": " containing " + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "text", + "content": " available tools, and a user query " + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "text", + "content": " the reasoning trajectory up to step " + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 67, + 590, + 291, + 631 + ], + "type": "text", + "content": " is denoted as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 72, + 641, + 285, + 656 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 641, + 285, + 656 + ], + "spans": [ + { + "bbox": [ + 72, + 641, + 285, + 656 + ], + "type": "interline_equation", + "content": "s _ {k} = (r _ {1}, \\mathcal {T} _ {1}, o _ {1}), (r _ {2}, \\mathcal {T} _ {2}, o _ {2}), \\ldots , (r _ {k}, \\mathcal {T} _ {k}, o _ {k}),", + "image_path": "415783b912665132f653d6829cf12aed85a413ce767be1229f19c574662eae1e.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "content": " denotes the model's natural language reasoning at step " + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_i \\subseteq \\mathcal{T}" + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "content": " denotes the set of tool calls invoked at step " + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "content": " denotes the observation received after executing tools in " + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_i" + }, + { + "bbox": [ + 67, + 666, + 290, + 734 + ], + "type": "text", + "content": ", possibly including both environment and user feedback." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": "At each step " + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "inline_equation", + "content": "k + 1" + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": ", the model must generate the next reasoning step " + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "inline_equation", + "content": "r_{k + 1}" + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": ", select a set of tools " + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{k + 1} \\subseteq \\mathcal{T}" + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": ", and formulate a grounded tool call (i.e.," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "text", + "content": "a parameterized invocation of each tool) to make progress toward solving " + }, + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 302, + 71, + 524, + 98 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 99, + 524, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 99, + 524, + 151 + ], + "spans": [ + { + "bbox": [ + 302, + 99, + 524, + 151 + ], + "type": "text", + "content": "The model's policy is defined as " + }, + { + "bbox": [ + 302, + 99, + 524, + 151 + ], + "type": "inline_equation", + "content": "\\pi : s_k \\to (r_{k+1}, \\mathcal{T}_{k+1})" + }, + { + "bbox": [ + 302, + 99, + 524, + 151 + ], + "type": "text", + "content": ", where the model's objective at each step is to select a tool set " + }, + { + "bbox": [ + 302, + 99, + 524, + 151 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{k+1}" + }, + { + "bbox": [ + 302, + 99, + 524, + 151 + ], + "type": "text", + "content": " that maximizes the immediate reward:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 325, + 163, + 503, + 183 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 163, + 503, + 183 + ], + "spans": [ + { + "bbox": [ + 325, + 163, + 503, + 183 + ], + "type": "interline_equation", + "content": "\\mathcal {T} _ {k + 1} ^ {*} = \\arg \\max _ {\\mathcal {T} _ {k + 1} \\subseteq \\mathcal {T}} R (s _ {k}, \\mathcal {T} _ {k + 1}, o _ {k + 1}),", + "image_path": "529571517299c4e79a2191a1426efa9a4f090624e6416b8c93bfbb298122d7dd.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 194, + 526, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 194, + 526, + 221 + ], + "spans": [ + { + "bbox": [ + 302, + 194, + 526, + 221 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 194, + 526, + 221 + ], + "type": "inline_equation", + "content": "R(\\cdot)" + }, + { + "bbox": [ + 302, + 194, + 526, + 221 + ], + "type": "text", + "content": " represents the reward function that evaluates progress made by invoking the tools in " + }, + { + "bbox": [ + 302, + 194, + 526, + 221 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_{k + 1}" + }, + { + "bbox": [ + 302, + 194, + 526, + 221 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 222, + 525, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 222, + 525, + 275 + ], + "spans": [ + { + "bbox": [ + 302, + 222, + 525, + 275 + ], + "type": "text", + "content": "While the immediate reward at each step is maximized, the model's policy is implicitly optimized to maximize the cumulative reward over the entire trajectory, formulated as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 334, + 284, + 493, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 284, + 493, + 321 + ], + "spans": [ + { + "bbox": [ + 334, + 284, + 493, + 321 + ], + "type": "interline_equation", + "content": "\\max _ {\\pi} \\mathbb {E} _ {\\pi} \\left[ \\sum_ {k = 1} ^ {K} R \\left(s _ {k}, \\mathcal {T} _ {k + 1}, o _ {k + 1}\\right) \\right],", + "image_path": "ff7dffe6c5eb67137c3b899eeeca3f28a0e079c2d36b14468db8f3c49c223c6c.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 329, + 526, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 329, + 526, + 491 + ], + "spans": [ + { + "bbox": [ + 302, + 329, + 526, + 491 + ], + "type": "text", + "content": "This formulation is valid because our training data includes ground truth tool calls at each step, allowing step-wise reward signals to guide multi-step success. Unlike QA tasks that focus solely on the final answer, tool selection and application tasks provide dense intermediate feedback. Moreover, we later demonstrate that our method enables the model to generalize to settings where tool calls are free-form and only the final outcome matters. Therefore, out task setting encourages the model to optimize tool use at each step while aligning with the overall task goal." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 500, + 389, + 512 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 500, + 389, + 512 + ], + "spans": [ + { + "bbox": [ + 302, + 500, + 389, + 512 + ], + "type": "text", + "content": "3.2 TIR Rollout" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 518, + 525, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 518, + 525, + 639 + ], + "spans": [ + { + "bbox": [ + 302, + 518, + 525, + 639 + ], + "type": "text", + "content": "To enable the model to autonomously generate reasoning traces and tool calls, we utilize a system prompt as shown in Figure 4 during rollout. The Tool List placeholder denotes the tool set " + }, + { + "bbox": [ + 302, + 518, + 525, + 639 + ], + "type": "inline_equation", + "content": "\\mathcal{T}" + }, + { + "bbox": [ + 302, + 518, + 525, + 639 + ], + "type": "text", + "content": ", which contains all tools available for invocation. We indicate in the instruction that the LLM should use special tokens , , and to indicate their thoughts, tool calls and responses in output." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 640, + 526, + 775 + ], + "type": "text", + "content": "As illustrated in Figure 3, when the model output includes , we automatically parse the tool calls into individual invocations using the model-predicted parameters. The outputs from executions are then inserted into the field and appended to the dialogue history, whose format is shown in Figure 12, serving as the model's interaction trajectory. Similarly, if the output contains , the corresponding response is parsed and appended to the dialogue history." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 68, + 526, + 229 + ], + "blocks": [ + { + "bbox": [ + 69, + 68, + 526, + 229 + ], + "lines": [ + { + "bbox": [ + 69, + 68, + 526, + 229 + ], + "spans": [ + { + "bbox": [ + 69, + 68, + 526, + 229 + ], + "type": "image", + "image_path": "68ae308fe4201a034cd5d0b2d3ae170be90697a5c0d2f6f3f221fa3f622dbd0d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 123, + 239, + 468, + 252 + ], + "lines": [ + { + "bbox": [ + 123, + 239, + 468, + 252 + ], + "spans": [ + { + "bbox": [ + 123, + 239, + 468, + 252 + ], + "type": "text", + "content": "Figure 3: Illustration of TIR rollout and calculation of format and correctness reward." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 274, + 291, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 274, + 291, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 291, + 354 + ], + "type": "text", + "content": "It is important to note that and are not mutually exclusive; they may co-occur within a single output. The user's initial query " + }, + { + "bbox": [ + 67, + 274, + 291, + 354 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 67, + 274, + 291, + 354 + ], + "type": "text", + "content": " is placed in the Initial User Input placeholder, and any subsequent user inputs are also appended to the dialogue history when present." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 365, + 167, + 379 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 365, + 167, + 379 + ], + "spans": [ + { + "bbox": [ + 67, + 365, + 167, + 379 + ], + "type": "text", + "content": "3.3 Reward Design" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 385, + 291, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 385, + 291, + 572 + ], + "spans": [ + { + "bbox": [ + 67, + 385, + 291, + 572 + ], + "type": "text", + "content": "Rule-based reward mechanisms have demonstrated strong empirical performance and are commonly employed. In our training, we similarly adopt a reward formulation that combines structural and correctness-based components, in line with prior works (Jin et al., 2025; Li et al., 2025b; Xie et al., 2025). Specifically, the format reward assesses whether the model output adheres to the expected structure including thoughts, tool calls, and responses, while the correctness reward evaluates the accuracy of tool invocations. Formally, the overall reward " + }, + { + "bbox": [ + 67, + 385, + 291, + 572 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{final}}(\\cdot)" + }, + { + "bbox": [ + 67, + 385, + 291, + 572 + ], + "type": "text", + "content": " is decomposed into two components: " + }, + { + "bbox": [ + 67, + 385, + 291, + 572 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{format}} + R_{\\mathrm{correct}}" + }, + { + "bbox": [ + 67, + 385, + 291, + 572 + ], + "type": "text", + "content": ", each described in detail below:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 584, + 290, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 584, + 290, + 638 + ], + "spans": [ + { + "bbox": [ + 67, + 584, + 290, + 638 + ], + "type": "text", + "content": "Format Reward. The format reward " + }, + { + "bbox": [ + 67, + 584, + 290, + 638 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_{\\mathrm{format}} \\in \\{0,1\\}" + }, + { + "bbox": [ + 67, + 584, + 290, + 638 + ], + "type": "text", + "content": " checks whether the model output contains all required special tokens in the correct order as specified by the ground truth:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 661, + 264, + 709 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 661, + 264, + 709 + ], + "spans": [ + { + "bbox": [ + 67, + 661, + 264, + 709 + ], + "type": "interline_equation", + "content": "\\mathcal {R} _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f a l l r e q u i r e d f i e l d s a p p e a r} \\\\ & \\text {a n d a r e i n t h e c o r r e c t o r d e r} \\\\ 0, & \\text {o t h e r w i s e} \\end{array} \\right.", + "image_path": "e10c00438e5de0d0dbd630290a68ceec36bd719e40a4b5c7b0aaef208c63b4b4.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "text", + "content": "Correctness Reward. The correctness reward " + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_{\\mathrm{correct}} \\in [-3, 3]" + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "text", + "content": " evaluates predicted tool calls " + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "inline_equation", + "content": "P = \\{P_1, \\dots, P_m\\}" + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "text", + "content": " against ground-truth calls " + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "inline_equation", + "content": "G = \\{G_1, \\dots, G_n\\}" + }, + { + "bbox": [ + 67, + 721, + 290, + 775 + ], + "type": "text", + "content": ". It includes three components:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 304, + 274, + 411, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 274, + 411, + 286 + ], + "spans": [ + { + "bbox": [ + 304, + 274, + 411, + 286 + ], + "type": "text", + "content": "- Tool Name Matching:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 365, + 294, + 473, + 318 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 365, + 294, + 473, + 318 + ], + "spans": [ + { + "bbox": [ + 365, + 294, + 473, + 318 + ], + "type": "interline_equation", + "content": "r _ {\\text {n a m e}} = \\frac {\\left| N _ {G} \\cap N _ {P} \\right|}{\\left| N _ {G} \\cup N _ {P} \\right|} \\in [ 0, 1 ]", + "image_path": "475af02afe198697f308f929d8e923cc81513bc679eef7fd36608576594447bf.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 312, + 325, + 524, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 325, + 524, + 365 + ], + "spans": [ + { + "bbox": [ + 312, + 325, + 524, + 365 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 312, + 325, + 524, + 365 + ], + "type": "inline_equation", + "content": "N_{G}" + }, + { + "bbox": [ + 312, + 325, + 524, + 365 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 312, + 325, + 524, + 365 + ], + "type": "inline_equation", + "content": "N_{P}" + }, + { + "bbox": [ + 312, + 325, + 524, + 365 + ], + "type": "text", + "content": " are the sets of tool names extracted from the ground-truth and predicted tool calls, respectively." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 367, + 437, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 367, + 437, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 367, + 437, + 380 + ], + "type": "text", + "content": "Parameter Name Matching:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 326, + 386, + 510, + 416 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 326, + 386, + 510, + 416 + ], + "spans": [ + { + "bbox": [ + 326, + 386, + 510, + 416 + ], + "type": "interline_equation", + "content": "r _ {\\text {p a r a m}} = \\sum_ {G _ {j} \\in G} \\frac {| \\mathrm {k e y s} (P _ {G}) \\cap \\mathrm {k e y s} (P _ {P}) |}{| \\mathrm {k e y s} (P _ {G}) \\cup \\mathrm {k e y s} (P _ {P}) |} \\in [ 0, | G | ]", + "image_path": "ef560187b3bf2a0218230a0cfde3dce8ed6eed56f13f4c71a06ec6bb13c78e1a.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 312, + 424, + 525, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 424, + 525, + 465 + ], + "spans": [ + { + "bbox": [ + 312, + 424, + 525, + 465 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 312, + 424, + 525, + 465 + ], + "type": "inline_equation", + "content": "\\mathrm{keys}(P_G)" + }, + { + "bbox": [ + 312, + 424, + 525, + 465 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 312, + 424, + 525, + 465 + ], + "type": "inline_equation", + "content": "\\mathrm{keys}(P_P)" + }, + { + "bbox": [ + 312, + 424, + 525, + 465 + ], + "type": "text", + "content": " represent the parameter names of the predicted and ground-truth tool calls, respectively." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 466, + 446, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 466, + 446, + 479 + ], + "spans": [ + { + "bbox": [ + 304, + 466, + 446, + 479 + ], + "type": "text", + "content": "Parameter Content Matching:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 337, + 486, + 499, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 486, + 499, + 540 + ], + "spans": [ + { + "bbox": [ + 337, + 486, + 499, + 540 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} r _ {\\text {v a l u e}} = \\sum_ {G _ {j} \\in G} \\sum_ {k \\in \\text {k e y s} (G _ {j})} \\mathbb {1} \\left[ P _ {G} [ k ] = P _ {P} [ k ] \\right] \\\\ \\in [ 0, \\sum_ {G _ {j} \\in G} | \\mathrm {k e y s} (G _ {j}) | ] \\\\ \\end{array}", + "image_path": "2cb5879e2c3a15c5bee23b68da6e3da2bed3470c8b34ef0ea35c0e16dd02548c.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 312, + 548, + 524, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 548, + 524, + 587 + ], + "spans": [ + { + "bbox": [ + 312, + 548, + 524, + 587 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 312, + 548, + 524, + 587 + ], + "type": "inline_equation", + "content": "P_{G}[k]" + }, + { + "bbox": [ + 312, + 548, + 524, + 587 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 312, + 548, + 524, + 587 + ], + "type": "inline_equation", + "content": "P_{P}[k]" + }, + { + "bbox": [ + 312, + 548, + 524, + 587 + ], + "type": "text", + "content": " represent the values of the parameters for the predicted and ground truth tool calls." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 590, + 473, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 590, + 473, + 602 + ], + "spans": [ + { + "bbox": [ + 304, + 590, + 473, + 602 + ], + "type": "text", + "content": "- Total match score for each match is:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 340, + 611, + 497, + 624 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 340, + 611, + 497, + 624 + ], + "spans": [ + { + "bbox": [ + 340, + 611, + 497, + 624 + ], + "type": "interline_equation", + "content": "r _ {\\text {m a t c h}} = r _ {\\text {n a m e}} + r _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} \\in [ 0, S _ {\\max} ]", + "image_path": "6a107517ff3b41916528d7c3398cdec725ecbb30988ad47dfd5b591224becce7.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 632, + 524, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 632, + 524, + 660 + ], + "spans": [ + { + "bbox": [ + 313, + 632, + 524, + 660 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 632, + 524, + 660 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{max}} = 1 + |G| + \\sum_{G_j \\in G} |\\mathrm{keys}(G_j)|" + }, + { + "bbox": [ + 313, + 632, + 524, + 660 + ], + "type": "text", + "content": " denotes the maximum possible score." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 668, + 524, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 668, + 524, + 707 + ], + "spans": [ + { + "bbox": [ + 302, + 668, + 524, + 707 + ], + "type": "text", + "content": "The total score is computed by finding the optimal matching between " + }, + { + "bbox": [ + 302, + 668, + 524, + 707 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 302, + 668, + 524, + 707 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 668, + 524, + 707 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 302, + 668, + 524, + 707 + ], + "type": "text", + "content": " to maximize the total match score:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 337, + 714, + 490, + 741 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 714, + 490, + 741 + ], + "spans": [ + { + "bbox": [ + 337, + 714, + 490, + 741 + ], + "type": "interline_equation", + "content": "\\mathcal {R} _ {\\text {c o r r e c t}} = 6 \\cdot \\frac {R _ {\\max}}{S _ {\\max}} - 3 \\in [ - 3, 3 ]", + "image_path": "8878db23730494c3a7f9edc77ec5c687ed034f8797c7ff815bf34796e19a9816.jpg" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 302, + 748, + 524, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 524, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 524, + 775 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 748, + 524, + 775 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{max}}" + }, + { + "bbox": [ + 302, + 748, + 524, + 775 + ], + "type": "text", + "content": " denotes the total match score from the optimal matching. The final correctness reward" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 117, + 72, + 223, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 72, + 223, + 83 + ], + "spans": [ + { + "bbox": [ + 117, + 72, + 223, + 83 + ], + "type": "text", + "content": "System Prompt for Training" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 89, + 476, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 89, + 476, + 112 + ], + "spans": [ + { + "bbox": [ + 116, + 89, + 476, + 112 + ], + "type": "text", + "content": "You are a helpful dialogue assistant capable of leveraging tool calls to solve user tasks and provide structured chat responses." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 117, + 123, + 180, + 133 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 123, + 180, + 133 + ], + "spans": [ + { + "bbox": [ + 117, + 123, + 180, + 133 + ], + "type": "text", + "content": "Available Tools" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 117, + 135, + 303, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 135, + 303, + 146 + ], + "spans": [ + { + "bbox": [ + 117, + 135, + 303, + 146 + ], + "type": "text", + "content": "In your response, you can use the following tools:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 147, + 168, + 158 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 147, + 168, + 158 + ], + "spans": [ + { + "bbox": [ + 117, + 147, + 168, + 158 + ], + "type": "text", + "content": "{{ToolList}}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 169, + 198, + 179 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 169, + 198, + 179 + ], + "spans": [ + { + "bbox": [ + 117, + 169, + 198, + 179 + ], + "type": "text", + "content": "Steps for Each Turn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 117, + 180, + 476, + 226 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 117, + 180, + 368, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 180, + 368, + 191 + ], + "spans": [ + { + "bbox": [ + 117, + 180, + 368, + 191 + ], + "type": "text", + "content": "1. Think: Recall relevant context and analyze the current user goal." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 117, + 192, + 416, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 192, + 416, + 203 + ], + "spans": [ + { + "bbox": [ + 117, + 192, + 416, + 203 + ], + "type": "text", + "content": "2. Decide on Tool Usage: If a tool is needed, specify the tool and its parameters." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 117, + 203, + 476, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 203, + 476, + 226 + ], + "spans": [ + { + "bbox": [ + 117, + 203, + 476, + 226 + ], + "type": "text", + "content": "3. Respond Appropriately: If a response is needed, generate one while maintaining consistency across user queries." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 117, + 237, + 180, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 237, + 180, + 248 + ], + "spans": [ + { + "bbox": [ + 117, + 237, + 180, + 248 + ], + "type": "text", + "content": "Output Format" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 116, + 248, + 476, + 328 + ], + "blocks": [ + { + "bbox": [ + 116, + 248, + 476, + 328 + ], + "lines": [ + { + "bbox": [ + 116, + 248, + 476, + 328 + ], + "spans": [ + { + "bbox": [ + 116, + 248, + 476, + 328 + ], + "type": "text", + "content": " Your thoughts and reasoning \n \n{“name”: “Tool name”, “parameters”: {“Parameter name”: “Parameter content”, “... ...”: “... ...”} \n{“name”: “... ...”, “parameters”: {“... ...”: “... ...”, “... ...”: “... ...”} \n... \n \n AI's final response " + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 117, + 339, + 185, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 339, + 185, + 350 + ], + "spans": [ + { + "bbox": [ + 117, + 339, + 185, + 350 + ], + "type": "text", + "content": "Important Notes" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 116, + 351, + 477, + 442 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 116, + 351, + 477, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 351, + 477, + 384 + ], + "spans": [ + { + "bbox": [ + 116, + 351, + 477, + 384 + ], + "type": "text", + "content": "1. You must always include the field to outline your reasoning. Provide at least one of or . Decide whether to use (possibly multiple times), , or both." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 385, + 477, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 385, + 477, + 419 + ], + "spans": [ + { + "bbox": [ + 116, + 385, + 477, + 419 + ], + "type": "text", + "content": "2. You can invoke multiple tool calls simultaneously in the fields. Each tool call should be a JSON object with a \"name\" field and a \"parameters\" field containing a dictionary of parameters. If no parameters are needed, leave the \"parameters\" field an empty dictionary." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 116, + 419, + 477, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 419, + 477, + 442 + ], + "spans": [ + { + "bbox": [ + 116, + 419, + 477, + 442 + ], + "type": "text", + "content": "3. Refer to the previous dialogue records in the history, including the user's queries, previous , , and any tool feedback noted as (if exists)." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 190, + 467, + 401, + 480 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 467, + 401, + 480 + ], + "spans": [ + { + "bbox": [ + 190, + 467, + 401, + 480 + ], + "type": "text", + "content": "Figure 4: The system prompt used for TIR's rollout." + } + ] + } + ], + "index": 17, + "type": "text" + }, + { + "bbox": [ + 67, + 502, + 290, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 502, + 290, + 555 + ], + "spans": [ + { + "bbox": [ + 67, + 502, + 290, + 555 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_{\\mathrm{correct}}" + }, + { + "bbox": [ + 67, + 502, + 290, + 555 + ], + "type": "text", + "content": " is the normalized reward for the matching process. We empirically set the reward scale within the range of " + }, + { + "bbox": [ + 67, + 502, + 290, + 555 + ], + "type": "inline_equation", + "content": "[-3, 3]" + }, + { + "bbox": [ + 67, + 502, + 290, + 555 + ], + "type": "text", + "content": ", with more analysis and ablations of reward scale presented in Section 5." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "text", + "content": "The final reward value " + }, + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_{\\mathrm{final}}" + }, + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "text", + "content": " is finally derived as the sum of " + }, + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_{\\mathrm{format}}" + }, + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "inline_equation", + "content": "\\mathcal{R}_{\\mathrm{correct}}" + }, + { + "bbox": [ + 67, + 555, + 289, + 581 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 98, + 590, + 259, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 590, + 259, + 605 + ], + "spans": [ + { + "bbox": [ + 98, + 590, + 259, + 605 + ], + "type": "interline_equation", + "content": "\\mathcal {R} _ {\\text {f i n a l}} = \\mathcal {R} _ {\\text {f o r m a t}} + \\mathcal {R} _ {\\text {c o r r e c t}} \\in [ - 3, 4 ]", + "image_path": "e4449cb8b28b2188f9d5261e72497785c4a9d27afaba691623bb1707a942ed67.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 613, + 290, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 613, + 290, + 747 + ], + "spans": [ + { + "bbox": [ + 67, + 613, + 290, + 747 + ], + "type": "text", + "content": "Unlike prior works that often rely on binary or overly simplified reward signals, our design captures the nuanced structure of tool calls by evaluating multiple interdependent components including tool names, parameter schemas, and parameter values. This fine-grained formulation better reflects the complexity of real-world tool use, where correctness cannot be reduced to a single binary criterion. We further validate the impact of this design through comprehensive analysis in Section 5." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 748, + 290, + 775 + ], + "type": "text", + "content": "Overall, our reward design ensures a balanced and interpretable evaluation signal by explicitly" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 502, + 526, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 502, + 526, + 594 + ], + "spans": [ + { + "bbox": [ + 302, + 502, + 526, + 594 + ], + "type": "text", + "content": "separating structural compliance from semantic correctness. By aligning rewards with both format adherence and fine-grained tool call accuracy, the model is guided to produce outputs that are not only syntactically valid but also semantically faithful, which is crucial for downstream tool execution and final task success." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 302, + 613, + 448, + 627 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 613, + 448, + 627 + ], + "spans": [ + { + "bbox": [ + 302, + 613, + 448, + 627 + ], + "type": "text", + "content": "3.4 RL Training with GRPO" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 302, + 637, + 526, + 718 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 637, + 526, + 718 + ], + "spans": [ + { + "bbox": [ + 302, + 637, + 526, + 718 + ], + "type": "text", + "content": "To tune the model with structured rewards, we employ GRPO, a variant of PPO that introduces advantage normalization within grouped samples. This normalization helps stabilize training by reducing variance across samples that share a common input context. Let " + }, + { + "bbox": [ + 302, + 637, + 526, + 718 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 302, + 637, + 526, + 718 + ], + "type": "text", + "content": " represent the current policy." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 302, + 735, + 526, + 748 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 735, + 526, + 748 + ], + "spans": [ + { + "bbox": [ + 302, + 735, + 526, + 748 + ], + "type": "text", + "content": "Normalized Advantage Across Query Groups." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 302, + 749, + 525, + 776 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 749, + 525, + 776 + ], + "spans": [ + { + "bbox": [ + 302, + 749, + 525, + 776 + ], + "type": "text", + "content": "For each query " + }, + { + "bbox": [ + 302, + 749, + 525, + 776 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 302, + 749, + 525, + 776 + ], + "type": "text", + "content": ", its responses derived from the rollout form a group " + }, + { + "bbox": [ + 302, + 749, + 525, + 776 + ], + "type": "inline_equation", + "content": "G_{Q}" + }, + { + "bbox": [ + 302, + 749, + 525, + 776 + ], + "type": "text", + "content": " consisting of multiple" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 286, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 286, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 286, + 84 + ], + "type": "text", + "content": "responses and their corresponding reward values:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 97, + 271, + 111 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 97, + 271, + 111 + ], + "spans": [ + { + "bbox": [ + 85, + 97, + 271, + 111 + ], + "type": "interline_equation", + "content": "G _ {Q} = \\left\\{A, \\left(s _ {1}, r _ {1}\\right), \\left(s _ {2}, r _ {2}\\right), \\dots , \\left(s _ {n}, r _ {n}\\right) \\right\\}", + "image_path": "5932a8fbca056a1ca11fa22120336d650ac8a48814c7c0bc73f5f6a45fc56fb2.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "spans": [ + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "text", + "content": " denotes the ground-truth annotation for " + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "text", + "content": ", and each reward " + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "text", + "content": " is computed as the sum of the format and correctness rewards associated with response " + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "text", + "content": ", i.e., " + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "inline_equation", + "content": "r_i = \\mathcal{R}_{\\mathrm{format}}(s_i, A) + \\mathcal{R}_{\\mathrm{correct}}(s_i, A)" + }, + { + "bbox": [ + 67, + 124, + 291, + 204 + ], + "type": "text", + "content": ". For each group, we calculate the mean and standard deviation of the rewards:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 93, + 225, + 264, + 259 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 225, + 264, + 259 + ], + "spans": [ + { + "bbox": [ + 93, + 225, + 264, + 259 + ], + "type": "interline_equation", + "content": "\\mu_ {Q} = \\frac {1}{n} \\sum_ {i = 1} ^ {n} r _ {i}, \\quad \\sigma_ {Q} = \\sqrt {\\frac {1}{n} \\sum_ {i = 1} ^ {n} (r _ {i} - \\mu_ {Q}) ^ {2}}", + "image_path": "3b638561a7bdff312708991620c15b142a3c0bd5ee01aaa71094fb8b17e8fa85.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 270, + 289, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 270, + 289, + 298 + ], + "spans": [ + { + "bbox": [ + 67, + 270, + 289, + 298 + ], + "type": "text", + "content": "Then, for each sample " + }, + { + "bbox": [ + 67, + 270, + 289, + 298 + ], + "type": "inline_equation", + "content": "s_i" + }, + { + "bbox": [ + 67, + 270, + 289, + 298 + ], + "type": "text", + "content": " in the group, we define the normalized advantage:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 138, + 306, + 220, + 328 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 306, + 220, + 328 + ], + "spans": [ + { + "bbox": [ + 138, + 306, + 220, + 328 + ], + "type": "interline_equation", + "content": "A _ {i} (s _ {i} | Q) = \\frac {r _ {i} - \\mu_ {Q}}{\\sigma_ {Q} + \\eta}", + "image_path": "ee4a6b609ba9897a35b08c96752b60d3ac344c8eea44066580b6b3d697518e76.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 334, + 279, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 334, + 279, + 347 + ], + "spans": [ + { + "bbox": [ + 70, + 334, + 279, + 347 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 70, + 334, + 279, + 347 + ], + "type": "inline_equation", + "content": "\\eta" + }, + { + "bbox": [ + 70, + 334, + 279, + 347 + ], + "type": "text", + "content": " is a constant to avoid division by zero." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 357, + 291, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 357, + 291, + 411 + ], + "spans": [ + { + "bbox": [ + 67, + 357, + 291, + 411 + ], + "type": "text", + "content": "Policy Optimization Objective. The policy " + }, + { + "bbox": [ + 67, + 357, + 291, + 411 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 67, + 357, + 291, + 411 + ], + "type": "text", + "content": " is optimized using the standard clipped PPO objective, adapted with our group-wise normalized advantages:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 76, + 430, + 281, + 478 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 430, + 281, + 478 + ], + "spans": [ + { + "bbox": [ + 76, + 430, + 281, + 478 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} J _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {Q \\sim \\mathcal {D}} \\mathbb {E} _ {s _ {i} \\sim \\pi_ {\\theta}} \\left[ \\min \\left(\\frac {\\pi_ {\\theta} (s _ {i} | Q)}{\\pi_ {\\mathrm {o l d}} (s _ {i} | Q)} A _ {i} (s _ {i} | Q), \\right. \\right. \\\\ \\left. \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} (s _ {i} | Q)}{\\pi_ {\\mathrm {o l d}} (s _ {i} | Q)}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} (s _ {i} | Q)\\right) \\Bigg ] \\\\ \\end{array}", + "image_path": "81f9b6e8e4a7a0542da24967179dbca4282e35b72d41ed6cabe381d7cacdbab0.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 489, + 290, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 489, + 290, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 489, + 290, + 597 + ], + "type": "text", + "content": "Unlike the original GRPO formulations, we omit the KL penalty term against a reference model. This design choice encourages the model to more freely adapt its behavior to our custom response format and structured reward signals. In practice, we observe that this leads to faster convergence and comparable performance, while also simplifying the training pipeline." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 598, + 290, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 598, + 290, + 680 + ], + "spans": [ + { + "bbox": [ + 67, + 598, + 290, + 680 + ], + "type": "text", + "content": "Overall, this objective guides the policy to generate structurally consistent and semantically accurate tool calls, while group-wise normalization mitigates reward variance across queries, leading to more stable and sample-efficient alignment with task-specific response requirements." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 692, + 155, + 705 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 692, + 155, + 705 + ], + "spans": [ + { + "bbox": [ + 67, + 692, + 155, + 705 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 716, + 174, + 729 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 716, + 174, + 729 + ], + "spans": [ + { + "bbox": [ + 67, + 716, + 174, + 729 + ], + "type": "text", + "content": "4.1 Training Dataset" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 735, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 290, + 774 + ], + "type": "text", + "content": "To support robust tool learning through RL, we construct a mixed dataset spanning diverse tool use scenarios:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 71, + 525, + 278 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 304, + 71, + 525, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 71, + 525, + 125 + ], + "spans": [ + { + "bbox": [ + 304, + 71, + 525, + 125 + ], + "type": "text", + "content": "- ToolACE (Liu et al., 2024): A general tool use dataset where the model learns when to invoke tools versus respond directly, improving decision-making in multi-step interactions." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 127, + 525, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 127, + 525, + 206 + ], + "spans": [ + { + "bbox": [ + 304, + 127, + 525, + 206 + ], + "type": "text", + "content": "- Hammer (Masked) (Lin et al., 2024): A subset of Hammer with randomized tool and parameter names, forcing the model to rely on descriptions rather than memorized labels, thus enhancing generalization and reducing overfitting to certain tools." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 211, + 524, + 278 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 211, + 524, + 278 + ], + "spans": [ + { + "bbox": [ + 304, + 211, + 524, + 278 + ], + "type": "text", + "content": "- xLAM (Zhang et al., 2024): A compositional dataset requiring one or multiple tool calls per turn, encouraging the model to reason about tool dependencies and plan diverse tool calling action actively." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 281, + 525, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 281, + 525, + 430 + ], + "spans": [ + { + "bbox": [ + 302, + 281, + 525, + 430 + ], + "type": "text", + "content": "For RL training, we sample 2K examples from ToolACE and 1K each from Hammer and xLAM, creating a balanced dataset spanning diverse levels of complexity and tool use. Multi-step trajectories are decomposed into single-step instances, with prior dialogue history injected into the user prompt (as shown in Figure 12) to preserve context. This setup encourages strategic exploration and teaches the model to select and apply tools appropriately within each step. Please see Appendix B for more details and justifications." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 439, + 427, + 451 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 439, + 427, + 451 + ], + "spans": [ + { + "bbox": [ + 302, + 439, + 427, + 451 + ], + "type": "text", + "content": "4.2 Experiment Settings" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 455, + 525, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 455, + 525, + 631 + ], + "spans": [ + { + "bbox": [ + 302, + 455, + 525, + 631 + ], + "type": "text", + "content": "Training. We conduct all RL experiments using the veRL framework (Sheng et al., 2024), adopting the GRPO algorithm detailed in the previous section. For each training step, we sample a batch of 512, and generate 4 responses per query, training for 15 epochs in total (see Appendix B for full configuration details). To encourage broader policy exploration, we remove KL regularization and apply a generation temperature of 1.0. We initialize our models with the Qwen-2.5-Instruct (Team, 2024) and Llama-3.2-Instruct (Dubey et al., 2024) series, which are further tuned under the GRPO objective with our customized reward design." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 638, + 526, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 638, + 526, + 746 + ], + "spans": [ + { + "bbox": [ + 302, + 638, + 526, + 746 + ], + "type": "text", + "content": "Evaluation. We evaluate our approach on the Berkeley Function Call Leaderboard (BFCL) (Patil et al., 2024), a comprehensive benchmark that spans a diverse set of challenges, including single-step reasoning, multi-step tool use, real-time execution, irrelevant tool rejection, simultaneous multi-tool selection, and multi-tool application2. In addition, we present results on API-" + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 752, + 506, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 506, + 773 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 506, + 773 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 302, + 752, + 506, + 773 + ], + "type": "text", + "content": "https://gorilla.cs.berkeley.edu/blogs/13_bfcl_v3-multi_turn.html" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 248 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 248 + ], + "type": "text", + "content": "Bank (Li et al., 2023), a three-level evaluation framework comprising 73 diverse and complex API tools. It assesses an LLM's ability to select and apply tools through natural multi-turn dialogues, across three levels of difficulty. We also evaluate on a representative QA benchmark Bamboogle (Press et al., 2022), which comprises a variety of question-answering tasks where performance is measured based on the final answer accuracy rather than the correctness of tool use. These broad coverage makes our evaluation setting effective for evaluating real-world LLM tool use proficiency. All results are reported in terms of accuracy." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 254, + 291, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 254, + 291, + 553 + ], + "spans": [ + { + "bbox": [ + 67, + 254, + 291, + 553 + ], + "type": "text", + "content": "Baselines. We compare our approach against several baselines to better isolate the effects of GRPO training: (1) Raw Instruct Model: the original model without any additional fine-tuning or RL, evaluated using the same prompts. (2) SFT on RL Data: the instruct model fine-tuned using the same 4K / selected 400 data points as the RL training set, providing a comparison point to assess whether GRPO training outperforms standard SFT. (3) GRPO on SFT Model: GRPO is applied to a model that has already undergone SFT on the selected 400 data points. This setup allows us to evaluate the impact of initializing GRPO with a format-aware model, in contrast to starting from the raw instruct model in a cold start manner. (4) PPO: We also include the standard PPO setting as a baseline to evaluate whether our reward design is effective beyond GRPO. We report results for both a cold start PPO model and a PPO model initialized with SFT, using the same hyperparameters as in the GRPO setup for a fair comparison. Please refer to Appendix B for more details and justifications." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 560, + 130, + 573 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 560, + 130, + 573 + ], + "spans": [ + { + "bbox": [ + 67, + 560, + 130, + 573 + ], + "type": "text", + "content": "4.3 Results" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 578, + 291, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 578, + 291, + 726 + ], + "spans": [ + { + "bbox": [ + 67, + 578, + 291, + 726 + ], + "type": "text", + "content": "Main Results. We report BFCL and API-Bank results in Table 1 and Table 2, respectively. Our GRPO method, trained from scratch on the Qwen2.5-Instruct series, generally outperforms other baselines, achieving " + }, + { + "bbox": [ + 67, + 578, + 291, + 726 + ], + "type": "inline_equation", + "content": "\\tilde{10}\\%" + }, + { + "bbox": [ + 67, + 578, + 291, + 726 + ], + "type": "text", + "content": " absolute gains over SFT trained on the same data volume. In contrast, LLaMA-3.2-Instruct shows less improvement, possibly due to the model's lower adaptability to GRPO-style generalization. Nevertheless, it remains competitive and outperforms most baselines on API-Bank." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 291, + 775 + ], + "type": "text", + "content": "SFT Initialization Impacts. Interestingly, GRPO also improves models initialized with limited SFT, often outperforming full-scale SFT" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 308, + 74, + 411, + 150 + ], + "blocks": [ + { + "bbox": [ + 308, + 74, + 411, + 150 + ], + "lines": [ + { + "bbox": [ + 308, + 74, + 411, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 74, + 411, + 150 + ], + "type": "image", + "image_path": "d8dc3638bde2b89fa52a2572854cae623f195a7bdeaf5d9b387e540c7b3b470f.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 156, + 395, + 166 + ], + "lines": [ + { + "bbox": [ + 324, + 156, + 395, + 166 + ], + "spans": [ + { + "bbox": [ + 324, + 156, + 395, + 166 + ], + "type": "text", + "content": "(a) Format Reward" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 418, + 74, + 521, + 150 + ], + "blocks": [ + { + "bbox": [ + 418, + 74, + 521, + 150 + ], + "lines": [ + { + "bbox": [ + 418, + 74, + 521, + 150 + ], + "spans": [ + { + "bbox": [ + 418, + 74, + 521, + 150 + ], + "type": "image", + "image_path": "86ab19a0b1d508548569e0402244154029f504b76c0052ae772036ac379e63d7.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 426, + 156, + 513, + 166 + ], + "lines": [ + { + "bbox": [ + 426, + 156, + 513, + 166 + ], + "spans": [ + { + "bbox": [ + 426, + 156, + 513, + 166 + ], + "type": "text", + "content": "(b) Correctness Reward" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 234, + 411, + 310 + ], + "blocks": [ + { + "bbox": [ + 302, + 179, + 525, + 216 + ], + "lines": [ + { + "bbox": [ + 302, + 179, + 525, + 216 + ], + "spans": [ + { + "bbox": [ + 302, + 179, + 525, + 216 + ], + "type": "text", + "content": "Figure 5: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different model initialization strategies." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 307, + 234, + 411, + 310 + ], + "lines": [ + { + "bbox": [ + 307, + 234, + 411, + 310 + ], + "spans": [ + { + "bbox": [ + 307, + 234, + 411, + 310 + ], + "type": "image", + "image_path": "7ac0f46c2122b5016b913482eafa9455530e12499baa0d34caae98e0d8b3c074.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 316, + 394, + 326 + ], + "lines": [ + { + "bbox": [ + 324, + 316, + 394, + 326 + ], + "spans": [ + { + "bbox": [ + 324, + 316, + 394, + 326 + ], + "type": "text", + "content": "(a) Format Reward" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 339, + 525, + 376 + ], + "lines": [ + { + "bbox": [ + 302, + 339, + 525, + 376 + ], + "spans": [ + { + "bbox": [ + 302, + 339, + 525, + 376 + ], + "type": "text", + "content": "Figure 6: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different RL strategies (GRPO v.s. PPO)." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 418, + 234, + 521, + 310 + ], + "blocks": [ + { + "bbox": [ + 418, + 234, + 521, + 310 + ], + "lines": [ + { + "bbox": [ + 418, + 234, + 521, + 310 + ], + "spans": [ + { + "bbox": [ + 418, + 234, + 521, + 310 + ], + "type": "image", + "image_path": "94ced1cd69372f6e86341575a504e8b4cc1f2bf372c3c145a6fdae3920f6dcdb.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 426, + 316, + 513, + 326 + ], + "lines": [ + { + "bbox": [ + 426, + 316, + 513, + 326 + ], + "spans": [ + { + "bbox": [ + 426, + 316, + 513, + 326 + ], + "type": "text", + "content": "(b) Correctness Reward" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 301, + 399, + 526, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 399, + 526, + 561 + ], + "spans": [ + { + "bbox": [ + 301, + 399, + 526, + 561 + ], + "type": "text", + "content": "trained on 10 times more data. However, this setup still underperforms compared to cold start GRPO. We hypothesize that SFT initialization leads to memorization and overfitting, which reduces the impact of GRPO's effectiveness in generalization. As shown in Figure 5, SFT-initialized models achieve higher training rewards due to distributional alignment between SFT and RL data, but empirically generalize worse on the two benchmarks. This further highlights that higher training rewards do not necessarily translate to better generalization." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 571, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 571, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 571, + 526, + 775 + ], + "type": "text", + "content": "Reward Design on PPO. We also evaluate PPO under both cold start and SFT-initialized settings to examine the effectiveness of our reward design. The results show that while PPO with a cold start can outperform SFT in some cases, it tends to be less stable across different model settings. In contrast, GRPO consistently achieves higher rewards even from a cold start, suggesting that our reward design is partially effective for PPO but works best in the GRPO framework. As shown in Figure 6, GRPO not only achieves higher correctness rewards but also gains format rewards more rapidly during training. Interestingly, PPO benefits from SFT initialization, generally yielding better results than a cold start, whereas GRPO performs better" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 68, + 523, + 298 + ], + "blocks": [ + { + "bbox": [ + 71, + 68, + 523, + 298 + ], + "lines": [ + { + "bbox": [ + 71, + 68, + 523, + 298 + ], + "spans": [ + { + "bbox": [ + 71, + 68, + 523, + 298 + ], + "type": "table", + "html": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400)40.21%65.12%61.11%56.69%1.00%94.44%60.14%
Qwen2.5-1.5B-Instruct (SFT4k)40.67%59.94%59.84%59.31%1.00%88.89%71.34%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (PPO Cold Start)38.32%79.40%70.11%45.24%0.87%100.00%18.09%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400)34.08%69.29%61.50%41.40%0.00%94.44%8.11%
Qwen2.5-3B-Instruct (SFT4k)41.97%62.85%54.73%59.17%0.75%77.78%75.12%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (PPO Cold Start)51.15%82.42%78.52%67.78%4.88%94.12%73.87%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400)34.08%69.29%66.68%41.4%0.00%94.44%8.11%
Qwen2.5-7B-Instruct (SFT4k)36.53%45.15%53.5%57.13%0.75%72.22%72.32%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100.00%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100.00%14.19%
Qwen2.5-7B-Instruct (PPO Cold Start)46.68%79.33%78.16%63.17%0.38%88.89%52.92%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)58.38%86.17%78.25%74.9%18.12%83.33%76.68%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400)41.22%64.27%62.18%58.37%0.75%66.67%71.12%
Llama-3.2-3B-Instruct (SFT4k)44.16%65.42%67.02%63.04%1.38%77.78%78.25%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (PPO Cold Start)42.98%84.00%72.00%52.80%2.88%100.00%31.94%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
", + "image_path": "a3a33f9935b2d034f4785ca3ce0edc9bfb4b0cce014fa130937d9a32138659a6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 71, + 329, + 301, + 550 + ], + "blocks": [ + { + "bbox": [ + 188, + 306, + 404, + 318 + ], + "lines": [ + { + "bbox": [ + 188, + 306, + 404, + 318 + ], + "spans": [ + { + "bbox": [ + 188, + 306, + 404, + 318 + ], + "type": "text", + "content": "Table 1: BFCL V3 Benchmark Results (Main Result)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 329, + 301, + 550 + ], + "lines": [ + { + "bbox": [ + 71, + 329, + 301, + 550 + ], + "spans": [ + { + "bbox": [ + 71, + 329, + 301, + 550 + ], + "type": "table", + "html": "
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400)53.60%57.14%50.75%44.27%
Qwen2.5-1.5B-Instruct (SFT4k)47.07%52.88%52.24%26.72%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (PPO Cold Start)40.54%44.61%31.34%32.82%
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)63.15%70.68%61.19%41.22%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400)52.76%59.65%50.75%32.82%
Qwen2.5-3B-Instruct (SFT4k)50.92%55.64%43.28%40.46%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (PPO Cold Start)57.62%64.66%59.70%35.11%
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)67.00%73.43%67.16%47.33%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400)50.59%55.89%50.75%34.35%
Qwen2.5-7B-Instruct (SFT4k)47.07%51.13%34.33%41.22%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.40%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (PPO Cold Start)61.64%68.67%44.78%48.85%
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)64.66%73.93%61.19%38.17%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400)52.76%60.65%35.82%37.40%
Llama-3.2-3B-Instruct (SFT4k)43.89%53.88%29.85%20.61%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (PPO Cold Start)55.78%60.65%41.79%48.09%
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)59.13%65.66%52.24%42.75%
", + "image_path": "e4d175360576c3fb81a40f514fde4894f0c62e6bb1915ba240566fc0a964c1a5.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 318, + 329, + 523, + 551 + ], + "blocks": [ + { + "bbox": [ + 91, + 560, + 280, + 571 + ], + "lines": [ + { + "bbox": [ + 91, + 560, + 280, + 571 + ], + "spans": [ + { + "bbox": [ + 91, + 560, + 280, + 571 + ], + "type": "text", + "content": "Table 2: API-Bank Test Results (Main Result)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 329, + 523, + 551 + ], + "lines": [ + { + "bbox": [ + 318, + 329, + 523, + 551 + ], + "spans": [ + { + "bbox": [ + 318, + 329, + 523, + 551 + ], + "type": "table", + "html": "
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400)24.8%0.78
Qwen2.5-1.5B-Instruct (SFT4k)23.2%1.25
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (PPO Cold Start)23.2%2.38
Qwen2.5-1.5B-Instruct (Ours, GRPO Cold Start)44.0%1.19
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400)54.4%0.86
Qwen2.5-3B-Instruct (SFT4k)49.6%0.92
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (PPO Cold Start)40.0%1.14
Qwen2.5-3B-Instruct (Ours, GRPO Cold Start)60.0%1.32
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400)28.8%3.71
Qwen2.5-7B-Instruct (SFT4k)30.4%1.06
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (PPO Cold Start)48.0%1.25
Qwen2.5-7B-Instruct (Ours, GRPO Cold Start)72.0%1.63
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400)44.0%0.98
Llama-3.2-3B-Instruct (SFT4k)48.8%0.98
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (PPO Cold Start)29.6%1.42
Llama-3.2-3B-Instruct (Ours, GRPO Cold Start)52.0%0.89
", + "image_path": "2151efeafc6930f4d8fc699da769a1f209b9d453a3e8ec9bf3cdf28ea194ce90.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 324, + 560, + 517, + 571 + ], + "lines": [ + { + "bbox": [ + 324, + 560, + 517, + 571 + ], + "spans": [ + { + "bbox": [ + 324, + 560, + 517, + 571 + ], + "type": "text", + "content": "Table 3: Bamboogle Test Results (Main Result)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 593, + 291, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 593, + 291, + 660 + ], + "spans": [ + { + "bbox": [ + 67, + 593, + 291, + 660 + ], + "type": "text", + "content": "when trained from scratch. These findings highlight that while PPO can benefit from our reward design, its impact is more limited compared to the more robust and consistent improvements observed with GRPO." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 680, + 292, + 775 + ], + "type": "text", + "content": "Generalization Studies. We evaluate the generalization ability of our trained model in two challenging settings: unfamiliar scenarios and novel task goals (both from BFCL benchmark subset). Specifically, we test the model's performance in tool usage within unseen programming languages and its ability to detect irrelevant tools, neither of" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 606, + 411, + 683 + ], + "blocks": [ + { + "bbox": [ + 307, + 606, + 411, + 683 + ], + "lines": [ + { + "bbox": [ + 307, + 606, + 411, + 683 + ], + "spans": [ + { + "bbox": [ + 307, + 606, + 411, + 683 + ], + "type": "image", + "image_path": "edc4a0db668429fb699878570feac579cd2731eaea04e93c2eba09dd09e6b856.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 315, + 689, + 404, + 699 + ], + "lines": [ + { + "bbox": [ + 315, + 689, + 404, + 699 + ], + "spans": [ + { + "bbox": [ + 315, + 689, + 404, + 699 + ], + "type": "text", + "content": "(a) Unfamiliar Scenario" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 417, + 607, + 521, + 683 + ], + "blocks": [ + { + "bbox": [ + 417, + 607, + 521, + 683 + ], + "lines": [ + { + "bbox": [ + 417, + 607, + 521, + 683 + ], + "spans": [ + { + "bbox": [ + 417, + 607, + 521, + 683 + ], + "type": "image", + "image_path": "9da49ecd03180b3351e89de045a7633df7596f714deea3d3adb864ec3d0ad88d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 432, + 688, + 506, + 699 + ], + "lines": [ + { + "bbox": [ + 432, + 688, + 506, + 699 + ], + "spans": [ + { + "bbox": [ + 432, + 688, + 506, + 699 + ], + "type": "text", + "content": "(b) Unfamiliar Goal" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 713, + 525, + 761 + ], + "lines": [ + { + "bbox": [ + 302, + 713, + 525, + 761 + ], + "spans": [ + { + "bbox": [ + 302, + 713, + 525, + 761 + ], + "type": "text", + "content": "Figure 7: Qwen2.5-3B-Instruct's performance across unfamiliar programming language scenarios (left) and novel relevance detection task goals (right), evaluated under different training settings." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 73, + 266, + 87 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 73, + 266, + 87 + ], + "spans": [ + { + "bbox": [ + 76, + 73, + 266, + 87 + ], + "type": "text", + "content": "User: I would like to buy a movie ticket in San Jose at 11 o'clock in the night. Tools Available:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 77, + 88, + 267, + 129 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 77, + 88, + 267, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 88, + 267, + 95 + ], + "spans": [ + { + "bbox": [ + 77, + 88, + 267, + 95 + ], + "type": "text", + "content": "1. Movies_1_BuyMovieTickets/movie_name, number_ofTickets, ...)" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 78, + 95, + 217, + 101 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 95, + 217, + 101 + ], + "spans": [ + { + "bbox": [ + 78, + 95, + 217, + 101 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 78, + 95, + 217, + 101 + ], + "type": "text", + "content": " Buy tickets for a movie at a specific time and location." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 78, + 101, + 189, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 101, + 189, + 107 + ], + "spans": [ + { + "bbox": [ + 78, + 101, + 189, + 107 + ], + "type": "text", + "content": "2. Movies 1 FindMovies location." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 78, + 108, + 193, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 108, + 193, + 115 + ], + "spans": [ + { + "bbox": [ + 78, + 108, + 193, + 115 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 78, + 108, + 193, + 115 + ], + "type": "text", + "content": " Search movies by location, genre, or format." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 78, + 116, + 243, + 122 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 116, + 243, + 122 + ], + "spans": [ + { + "bbox": [ + 78, + 116, + 243, + 122 + ], + "type": "text", + "content": "3. Movies_1_GetTimesForMovie/movie_name, location, ...)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 123, + 227, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 123, + 227, + 129 + ], + "spans": [ + { + "bbox": [ + 78, + 123, + 227, + 129 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 78, + 123, + 227, + 129 + ], + "type": "text", + "content": " Get showtimes for a movie on a specific date and location." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 76, + 134, + 274, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 134, + 274, + 141 + ], + "spans": [ + { + "bbox": [ + 76, + 134, + 274, + 141 + ], + "type": "text", + "content": " The user wants to buy a movie ticket in San Jose at 11 PM. The available" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 77, + 142, + 289, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 142, + 289, + 177 + ], + "spans": [ + { + "bbox": [ + 77, + 142, + 289, + 177 + ], + "type": "text", + "content": "tool, Movies_1_BuyMovieTickets, allows us to purchase movie tickets with the required details such as movie name, number of tickets, show date, location, show time, and show type. We need to find the movie name and the show date. However, the user did not specify the movie name or the show date. We can infer the show date from the current date, but we need to know the movie name to proceed with purchase. " + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 77, + 184, + 282, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 184, + 282, + 190 + ], + "spans": [ + { + "bbox": [ + 77, + 184, + 282, + 190 + ], + "type": "text", + "content": " To assist in purchasing a movie ticket, I need the name of the movie and" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 77, + 191, + 285, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 191, + 285, + 198 + ], + "spans": [ + { + "bbox": [ + 77, + 191, + 285, + 198 + ], + "type": "text", + "content": "the specific date of the show. Could you please provide this information? " + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 77, + 210, + 263, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 210, + 263, + 216 + ], + "spans": [ + { + "bbox": [ + 77, + 210, + 263, + 216 + ], + "type": "text", + "content": "User: What is the slope of the line which is perpendicular to the line with the" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 77, + 217, + 129, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 217, + 129, + 224 + ], + "spans": [ + { + "bbox": [ + 77, + 217, + 129, + 224 + ], + "type": "text", + "content": "equation " + }, + { + "bbox": [ + 77, + 217, + 129, + 224 + ], + "type": "inline_equation", + "content": "y = 3x + 2" + }, + { + "bbox": [ + 77, + 217, + 129, + 224 + ], + "type": "text", + "content": "?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 77, + 224, + 117, + 231 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 224, + 117, + 231 + ], + "spans": [ + { + "bbox": [ + 77, + 224, + 117, + 231 + ], + "type": "text", + "content": "Tools Available:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 77, + 232, + 261, + 253 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 77, + 232, + 261, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 232, + 261, + 238 + ], + "spans": [ + { + "bbox": [ + 77, + 232, + 261, + 238 + ], + "type": "text", + "content": "1. findcritical_points(function, variable, range=[0.0, 3.4])" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 77, + 239, + 254, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 239, + 254, + 253 + ], + "spans": [ + { + "bbox": [ + 77, + 239, + 254, + 253 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 77, + 239, + 254, + 253 + ], + "type": "text", + "content": " Find critical points of a function with respect to a variable, optionally within a range." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 76, + 258, + 290, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 258, + 290, + 300 + ], + "spans": [ + { + "bbox": [ + 76, + 258, + 290, + 300 + ], + "type": "text", + "content": " The problem is asking for the slope of a line that is perpendicular to the line with the equation " + }, + { + "bbox": [ + 76, + 258, + 290, + 300 + ], + "type": "inline_equation", + "content": "y = 3x + 2" + }, + { + "bbox": [ + 76, + 258, + 290, + 300 + ], + "type": "text", + "content": ". The slope of the given line is 3. The slope of a line that is perpendicular to another line is the negative reciprocal of the original line's slope. So, the slope of the line perpendicular to " + }, + { + "bbox": [ + 76, + 258, + 290, + 300 + ], + "type": "inline_equation", + "content": "y = 3x + 2" + }, + { + "bbox": [ + 76, + 258, + 290, + 300 + ], + "type": "text", + "content": " would be -1/3. The given tool find_critical_points is not relevant to finding the slope of a line and cannot be used to solve this problem." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 77, + 306, + 254, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 306, + 254, + 313 + ], + "spans": [ + { + "bbox": [ + 77, + 306, + 254, + 313 + ], + "type": "text", + "content": " The slope of the line that is perpendicular to the line with the" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 77, + 313, + 175, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 313, + 175, + 321 + ], + "spans": [ + { + "bbox": [ + 77, + 313, + 175, + 321 + ], + "type": "text", + "content": "equation " + }, + { + "bbox": [ + 77, + 313, + 175, + 321 + ], + "type": "inline_equation", + "content": "y = 3x + 2" + }, + { + "bbox": [ + 77, + 313, + 175, + 321 + ], + "type": "text", + "content": " is -1/3. " + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 333, + 289, + 358 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 333, + 289, + 358 + ], + "spans": [ + { + "bbox": [ + 67, + 333, + 289, + 358 + ], + "type": "text", + "content": "Table 4: Cases of Qwen2.5-3B-Instruct (GRPO Cold Start) generalizing to irrelevant tool detection goals." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 67, + 382, + 290, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 382, + 290, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 382, + 290, + 544 + ], + "type": "text", + "content": "which were explicitly included during RL training or in the dataset. As shown in Figure 7, Qwen2.5-3B-Instruct, when trained from scratch with our GRPO-based reward design, consistently achieves highest performance. Additionally, Section 4.3 presents two qualitative examples where the model proactively rejects inappropriate tool use—first by clarifying ambiguous intent, and second by opting to answer directly without tools. These behaviors reflect emergent proactivity and metacognition, enhancing efficiency, reducing hallucinations, and signaling foundational agentic intelligence." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 67, + 556, + 290, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 556, + 290, + 691 + ], + "spans": [ + { + "bbox": [ + 67, + 556, + 290, + 691 + ], + "type": "text", + "content": "Free-form Inference Effectiveness. While our model is trained with a focus on tool call format and correctness, we further evaluate its ability to handle free-form tool use in a QA setting. Unlike the structured tool selection and application tasks, QA setting: (1) imposes no constraints on tool call parameters, and (2) evaluates only the final answer, making it a \"goal-oriented\" rather than a \"process-oriented\" task. This naturally introduces a multi-step interaction scenario." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 67, + 693, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 693, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 693, + 290, + 775 + ], + "type": "text", + "content": "Specifically, we use Bamboogle, a multi-hop QA dataset, to assess this capability. The model is equipped with a web search tool, and we report both the answer accuracy and the number of tool calls for all baselines and our approach. As shown in Table 3, our reward design achieves the highest" + } + ] + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 308, + 75, + 411, + 150 + ], + "blocks": [ + { + "bbox": [ + 308, + 75, + 411, + 150 + ], + "lines": [ + { + "bbox": [ + 308, + 75, + 411, + 150 + ], + "spans": [ + { + "bbox": [ + 308, + 75, + 411, + 150 + ], + "type": "image", + "image_path": "3f289fa8a90fbf8fbf687744f30d2bb325c3c09d051f0b7147fec1d6f9461d2b.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 320, + 157, + 398, + 168 + ], + "lines": [ + { + "bbox": [ + 320, + 157, + 398, + 168 + ], + "spans": [ + { + "bbox": [ + 320, + 157, + 398, + 168 + ], + "type": "text", + "content": "(a) Response Length" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 417, + 74, + 521, + 150 + ], + "blocks": [ + { + "bbox": [ + 417, + 74, + 521, + 150 + ], + "lines": [ + { + "bbox": [ + 417, + 74, + 521, + 150 + ], + "spans": [ + { + "bbox": [ + 417, + 74, + 521, + 150 + ], + "type": "image", + "image_path": "aa1c9884a4981a22763d4b52e5c17a6c9e5dc5abe1c1948ebb70001bf6540d0a.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 434, + 157, + 505, + 167 + ], + "lines": [ + { + "bbox": [ + 434, + 157, + 505, + 167 + ], + "spans": [ + { + "bbox": [ + 434, + 157, + 505, + 167 + ], + "type": "text", + "content": "(b) Length Reward" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 180, + 525, + 205 + ], + "lines": [ + { + "bbox": [ + 302, + 180, + 525, + 205 + ], + "spans": [ + { + "bbox": [ + 302, + 180, + 525, + 205 + ], + "type": "text", + "content": "Figure 8: Response length (left) and its reward (right) trends across training steps for different models." + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_caption" + } + ], + "index": 27 + }, + { + "bbox": [ + 302, + 226, + 525, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 226, + 525, + 320 + ], + "spans": [ + { + "bbox": [ + 302, + 226, + 525, + 320 + ], + "type": "text", + "content": "performance, despite this setting not being explicitly seen during training. Notably, our cold start GRPO model surpasses others in accuracy without relying on excessive number of tool calls. This suggests that the model can flexibly invoke tools when needed, effectively leverage feedback, wisely and efficiently navigating toward the correct answer." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 302, + 330, + 368, + 343 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 330, + 368, + 343 + ], + "spans": [ + { + "bbox": [ + 302, + 330, + 368, + 343 + ], + "type": "text", + "content": "5 Analysis" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 302, + 351, + 525, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 351, + 525, + 418 + ], + "spans": [ + { + "bbox": [ + 302, + 351, + 525, + 418 + ], + "type": "text", + "content": "In this section, we conduct a series of ablation studies to identify the most effective reward design for tool calling. We explore various factors including reward type, scale, granularity, and temporal dynamics." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 302, + 428, + 446, + 441 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 428, + 446, + 441 + ], + "spans": [ + { + "bbox": [ + 302, + 428, + 446, + 441 + ], + "type": "text", + "content": "5.1 Effect of Length Reward" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 302, + 445, + 525, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 445, + 525, + 526 + ], + "spans": [ + { + "bbox": [ + 302, + 445, + 525, + 526 + ], + "type": "text", + "content": "We first examine the role of a length-based reward. Prior work has demonstrated that the R1-like models can promote deeper reasoning, often reflected in longer thinking traces. To encourage this behavior, we introduce a reward term proportional to the length of the field:" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 352, + 534, + 473, + 564 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 534, + 473, + 564 + ], + "spans": [ + { + "bbox": [ + 352, + 534, + 473, + 564 + ], + "type": "interline_equation", + "content": "\\mathcal {R} _ {\\text {l e n g t h}} = \\min \\left(\\frac {L _ {\\text {t h i n k}}}{L _ {\\text {t a r g e t}}}, 1\\right)", + "image_path": "a34ee3ccb5b1cec76a45553952cfaf1d5d9d51339583c1ccec92529b9ae958ab.jpg" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 302, + 571, + 525, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 571, + 525, + 693 + ], + "spans": [ + { + "bbox": [ + 302, + 571, + 525, + 693 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 571, + 525, + 693 + ], + "type": "inline_equation", + "content": "L_{\\text{think}}" + }, + { + "bbox": [ + 302, + 571, + 525, + 693 + ], + "type": "text", + "content": " denotes the length of the thinking segment in model's output, and " + }, + { + "bbox": [ + 302, + 571, + 525, + 693 + ], + "type": "inline_equation", + "content": "L_{\\text{target}}" + }, + { + "bbox": [ + 302, + 571, + 525, + 693 + ], + "type": "text", + "content": " denotes the target output length, which we empirically set to 512. We found that the raw model rarely generates responses longer than half this length, making 512 a reasonable and effective target for encouraging longer outputs. This length-based component is added to the overall reward, which now consists of format, correctness, and reasoning length." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 694, + 525, + 775 + ], + "type": "text", + "content": "As shown in Figure 8, both response length and the length reward generally increase throughout training, particularly for the Qwen model series. This indicates that the length reward effectively encourages longer reasoning. However, the downstream results in Table 5 reveal that adding a length" + } + ] + } + ], + "index": 37 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 68, + 524, + 160 + ], + "blocks": [ + { + "bbox": [ + 71, + 68, + 524, + 160 + ], + "lines": [ + { + "bbox": [ + 71, + 68, + 524, + 160 + ], + "spans": [ + { + "bbox": [ + 71, + 68, + 524, + 160 + ], + "type": "table", + "html": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (w/ Length Reward)33.23%70.58%71.36%35.63%0.50%94.44%4.52%
Qwen2.5-1.5B-Instruct (Dynamic)28.51%53.23%48.23%38.07%0.00%55.56%25.08%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (w/ Length reward)48.89%77.83%78.61%63.56%4.50%88.24%71.22%
Qwen2.5-3B-Instruct (Dynamic)48.24%77.60%79.11%63.22%3.00%88.89%68.53%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (w/ Length reward)44.98%78.02%77.54%56.55%1.25%100.00%63.76%
Llama-3.2-3B-Instruct (Dynamic)43.15%75.50%71.64%56.06%1.00%100.00%57.82%
", + "image_path": "cd6dcd5e59416e9771d2031ef78c027d93b9d9dfba2d8b2e158e459218092ead.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 198, + 167, + 394, + 179 + ], + "lines": [ + { + "bbox": [ + 198, + 167, + 394, + 179 + ], + "spans": [ + { + "bbox": [ + 198, + 167, + 394, + 179 + ], + "type": "text", + "content": "Table 5: BFCL V3 Benchmark Results (Length)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 72, + 204, + 176, + 282 + ], + "blocks": [ + { + "bbox": [ + 72, + 204, + 176, + 282 + ], + "lines": [ + { + "bbox": [ + 72, + 204, + 176, + 282 + ], + "spans": [ + { + "bbox": [ + 72, + 204, + 176, + 282 + ], + "type": "image", + "image_path": "e489e46a097675008b0b2b8264d88eeba35e4558c034f26f0ab7174772e2d697.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 85, + 286, + 163, + 298 + ], + "lines": [ + { + "bbox": [ + 85, + 286, + 163, + 298 + ], + "spans": [ + { + "bbox": [ + 85, + 286, + 163, + 298 + ], + "type": "text", + "content": "(a) Response Length" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 182, + 204, + 286, + 280 + ], + "blocks": [ + { + "bbox": [ + 182, + 204, + 286, + 280 + ], + "lines": [ + { + "bbox": [ + 182, + 204, + 286, + 280 + ], + "spans": [ + { + "bbox": [ + 182, + 204, + 286, + 280 + ], + "type": "image", + "image_path": "d4f5c580a080fa660a72ece01f1f516aab789459b4ebb589abcec9b899671c97.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 198, + 286, + 270, + 298 + ], + "lines": [ + { + "bbox": [ + 198, + 286, + 270, + 298 + ], + "spans": [ + { + "bbox": [ + 198, + 286, + 270, + 298 + ], + "type": "text", + "content": "(b) Length Reward" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 311, + 290, + 348 + ], + "lines": [ + { + "bbox": [ + 67, + 311, + 290, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 311, + 290, + 348 + ], + "type": "text", + "content": "Figure 9: Response length (left) and its reward (right) trends across training steps within the dynamic length reward training setting." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 368, + 290, + 476 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 368, + 290, + 476 + ], + "spans": [ + { + "bbox": [ + 67, + 368, + 290, + 476 + ], + "type": "text", + "content": "reward does not consistently improve task performance, and in smaller-scale models, it can even cause substantial degradation. These observations suggest that while extended reasoning may appear desirable, it is not always beneficial for tool use tasks. In fact, excessive length may introduce unnecessary complexity, leading to overthinking and reduced effectiveness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 484, + 290, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 484, + 290, + 539 + ], + "spans": [ + { + "bbox": [ + 67, + 484, + 290, + 539 + ], + "type": "text", + "content": "Dynamic Length Reward. Since fixed-length rewards showed minimal impact and converged quickly, we explored a dynamic length reward that adapts over training steps. Specifically, we define:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 93, + 547, + 263, + 577 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 547, + 263, + 577 + ], + "spans": [ + { + "bbox": [ + 93, + 547, + 263, + 577 + ], + "type": "interline_equation", + "content": "\\mathcal {R} _ {\\mathrm {d y n a m i c}} = \\min \\left(\\frac {L _ {\\mathrm {t h i n k}}}{L _ {\\mathrm {t a r g e t}} \\cdot (1 + p)}, 1\\right)", + "image_path": "ed928223fecc4f4255ed002c6cfe7a99f24f35477bb84a44e7049c3547618c4e.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 585, + 289, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 585, + 289, + 652 + ], + "spans": [ + { + "bbox": [ + 67, + 585, + 289, + 652 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 585, + 289, + 652 + ], + "type": "inline_equation", + "content": "S" + }, + { + "bbox": [ + 67, + 585, + 289, + 652 + ], + "type": "text", + "content": " denotes the training steps and " + }, + { + "bbox": [ + 67, + 585, + 289, + 652 + ], + "type": "inline_equation", + "content": "p = \\frac{S_{\\mathrm{current}}}{S_{\\mathrm{total}}} \\in [0,1]" + }, + { + "bbox": [ + 67, + 585, + 289, + 652 + ], + "type": "text", + "content": " represents the normalized training progress. This formulation gradually increases the target thinking length over time, aligning with model maturity." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 653, + 290, + 747 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 653, + 290, + 747 + ], + "spans": [ + { + "bbox": [ + 67, + 653, + 290, + 747 + ], + "type": "text", + "content": "As shown in fig. 9, this approach yields a steadier growth in thinking length, particularly for the Llama model. However, the performance results in Table 5 reveal that even scheduled rewards fail to improve performance. This further supports our hypothesis that extended reasoning may not benefit this task and can even have adverse effects." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 308, + 204, + 516, + 286 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 308, + 204, + 516, + 286 + ], + "spans": [ + { + "bbox": [ + 308, + 204, + 516, + 286 + ], + "type": "text", + "content": "Takeaway 1: While length rewards encourage longer reasoning traces, they do not consistently improve task performance and may even harm it in smaller models, highlighting that longer reasoning is not inherently better for tool use tasks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 313, + 437, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 313, + 437, + 326 + ], + "spans": [ + { + "bbox": [ + 302, + 313, + 437, + 326 + ], + "type": "text", + "content": "5.2 Effect of Reward Scale" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 331, + 525, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 331, + 525, + 465 + ], + "spans": [ + { + "bbox": [ + 302, + 331, + 525, + 465 + ], + "type": "text", + "content": "Next, we investigate the effect of reward scaling, specifically the relative weighting between correctness and format rewards. Prior work in R1-style RL commonly assigns a higher weight to correctness reward than to format reward (Xie et al., 2025; Jin et al., 2025), emphasizing the importance of learning correct answer over superficial adherence to format. This strategy helps prevent reward hacking, where a model might exploit formatting heuristics without learning task semantics." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 467, + 525, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 467, + 525, + 547 + ], + "spans": [ + { + "bbox": [ + 302, + 467, + 525, + 547 + ], + "type": "text", + "content": "To test the importance of this design choice, we conduct an ablation where we equalize the maximum correctness and format rewards by setting the former's range to " + }, + { + "bbox": [ + 302, + 467, + 525, + 547 + ], + "type": "inline_equation", + "content": "[-1, 1]" + }, + { + "bbox": [ + 302, + 467, + 525, + 547 + ], + "type": "text", + "content": ", matching that of the format reward. This adjustment only affects the final normalization step of the correctness reward:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 337, + 555, + 490, + 584 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 337, + 555, + 490, + 584 + ], + "spans": [ + { + "bbox": [ + 337, + 555, + 490, + 584 + ], + "type": "interline_equation", + "content": "\\mathcal {R} _ {\\mathrm {c o r r e c t}} = 2 \\cdot \\frac {R _ {\\mathrm {m a x}}}{S _ {\\mathrm {m a x}}} - 1 \\in [ - 1, 1 ]", + "image_path": "6d37d94d2f04bd05e02881808c03d3acfb39b97d12b5a68e08f2261264aaa439.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 592, + 516, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 592, + 516, + 603 + ], + "spans": [ + { + "bbox": [ + 302, + 592, + 516, + 603 + ], + "type": "text", + "content": "where all variables are defined as in Section 3.3." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 605, + 525, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 605, + 525, + 740 + ], + "spans": [ + { + "bbox": [ + 302, + 605, + 525, + 740 + ], + "type": "text", + "content": "As shown in Table 6, this equal-scaling variant, denoted as \"Equal Max\", results in a slight drop in overall accuracy across most models, with the exception of Qwen2.5-3B, which maintains performance comparable to the original setting. These results underscore the importance of assigning greater weight to correctness reward: doing so helps steer the model toward mastering the core reasoning and tool use capabilities necessary for robust generalization." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 748, + 525, + 775 + ], + "type": "text", + "content": "Dynamic Reward Scaling. Building on the insight that correctness reward plays a more critical" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 69, + 524, + 188 + ], + "blocks": [ + { + "bbox": [ + 71, + 69, + 524, + 188 + ], + "lines": [ + { + "bbox": [ + 71, + 69, + 524, + 188 + ], + "spans": [ + { + "bbox": [ + 71, + 69, + 524, + 188 + ], + "type": "table", + "html": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Equal max)39.47%78.56%75.50%45.45%2.50%100.00%16.44%
Qwen2.5-1.5B-Instruct (Two stage)38.85%77.96%76.23%44.51%2.25%100.00%10.61%
Qwen2.5-1.5B-Instruct (Dynamic)45.71%78.31%75.73%58.91%2.50%100.00%57.20%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Equal max)51.76%81.50%79.50%69.79%4.25%88.89%78.07%
Qwen2.5-3B-Instruct (Two stage)50.66%80.62%78.82%67.93%3.50%88.89%76.42%
Qwen2.5-3B-Instruct (Dynamic)53.81%81.44%80.75%75.43%3.62%77.78%88.82%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Equal max)42.47%67.77%75.05%55.75%1.00%88.89%59.56%
Llama-3.2-3B-Instruct (Two stage)41.33%65.54%72.70%55.22%0.75%88.89%57.59%
Llama-3.2-3B-Instruct (Dynamic)46.85%83.00%72.77%61.00%3.38%88.89%59.37%
", + "image_path": "546a4bc485365caf449427b56cbaac3273cab3509f2a8e83c4907d6a20f983a1.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 201, + 195, + 391, + 206 + ], + "lines": [ + { + "bbox": [ + 201, + 195, + 391, + 206 + ], + "spans": [ + { + "bbox": [ + 201, + 195, + 391, + 206 + ], + "type": "text", + "content": "Table 6: BFCL V3 Benchmark Results (Scale)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 229, + 290, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 229, + 290, + 378 + ], + "spans": [ + { + "bbox": [ + 67, + 229, + 290, + 378 + ], + "type": "text", + "content": "role, we are further motivated by the intuition that different reward components may benefit from being emphasized at different stages of training. This leads us to explore dynamically adjusting reward scales in accordance with training progress. Specifically, we hypothesize that in early training, the model should prioritize learning the correct output format, which entails an easier objective, before gradually shifting focus to the more challenging goal of tool use correctness. To test this hypothesis, we design two dynamic reward scaling strategies:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "spans": [ + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "text", + "content": "- Two stage (Coarse) Setting: We divide training into two phases. In the first " + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "text", + "content": " training steps, we downscale the correctness reward to " + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "inline_equation", + "content": "\\frac{1}{3}" + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "text", + "content": " of its original scale while keeping the format reward at its original scale. After step " + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "inline_equation", + "content": "s" + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "text", + "content": ", we restore the correctness reward to its original scale and simultaneously reduce the format reward to range " + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "inline_equation", + "content": "[0, 0.5]" + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "inline_equation", + "content": "\\frac{1}{2}" + }, + { + "bbox": [ + 69, + 380, + 291, + 502 + ], + "type": "text", + "content": " of its original scale). Formally the reward scales are:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 94, + 509, + 271, + 544 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 509, + 271, + 544 + ], + "spans": [ + { + "bbox": [ + 94, + 509, + 271, + 544 + ], + "type": "interline_equation", + "content": "\\operatorname {S c a l e} _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} [ 0, 1 ] & \\text {i f S _ {\\text {c u r r e n t}} < s} \\\\ [ 0, 0. 5 ] & \\text {o t h e r w i s e} \\end{array} , \\right.", + "image_path": "09cf0aa386c623786b51eee799f30c5b2ff563161f0926be10a80c2546743135.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 554, + 270, + 589 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 554, + 270, + 589 + ], + "spans": [ + { + "bbox": [ + 96, + 554, + 270, + 589 + ], + "type": "interline_equation", + "content": "\\operatorname {S c a l e} _ {\\text {c o r r e c t}} = \\left\\{ \\begin{array}{l l} [ - 1, 1 ] & \\text {i f S _ {\\text {c u r r e n t}} < s} \\\\ [ - 3, 3 ] & \\text {o t h e r w i s e} \\end{array} \\right.", + "image_path": "fb17f93bfe1ec9a690daa9777f7c36b8a3fb1a902a12a04c14d86bb1e48abbc9.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 76, + 593, + 291, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 593, + 291, + 687 + ], + "spans": [ + { + "bbox": [ + 76, + 593, + 291, + 687 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 76, + 593, + 291, + 687 + ], + "type": "inline_equation", + "content": "S_{\\mathrm{current}}" + }, + { + "bbox": [ + 76, + 593, + 291, + 687 + ], + "type": "text", + "content": " denotes the current training step. In our experiments, we empirically set the switching point to " + }, + { + "bbox": [ + 76, + 593, + 291, + 687 + ], + "type": "inline_equation", + "content": "s = 30" + }, + { + "bbox": [ + 76, + 593, + 291, + 687 + ], + "type": "text", + "content": " steps, as we observed that the format reward typically experiences a significant increase within the first 30 steps. Therefore, it is more beneficial for later steps to shift focus toward optimizing correctness." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 693, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 693, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 69, + 693, + 291, + 775 + ], + "type": "text", + "content": "- Dynamic (Finegrained) Setting: We apply continuous interpolation between the two reward scales throughout training. Initially, both the format and correctness reward scales are set equally. Over time, the format reward scale linearly decays to its original value, while the correctness" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 231, + 411, + 309 + ], + "blocks": [ + { + "bbox": [ + 307, + 231, + 411, + 309 + ], + "lines": [ + { + "bbox": [ + 307, + 231, + 411, + 309 + ], + "spans": [ + { + "bbox": [ + 307, + 231, + 411, + 309 + ], + "type": "image", + "image_path": "c4a42ce99210f1c629662d6104f163b5fa2dbc2a5dfddaa2923d8c1986ba915a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 324, + 313, + 395, + 324 + ], + "lines": [ + { + "bbox": [ + 324, + 313, + 395, + 324 + ], + "spans": [ + { + "bbox": [ + 324, + 313, + 395, + 324 + ], + "type": "text", + "content": "(a) Format Reward" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 417, + 231, + 521, + 308 + ], + "blocks": [ + { + "bbox": [ + 417, + 231, + 521, + 308 + ], + "lines": [ + { + "bbox": [ + 417, + 231, + 521, + 308 + ], + "spans": [ + { + "bbox": [ + 417, + 231, + 521, + 308 + ], + "type": "image", + "image_path": "07889187074a1d5140bc409ad1bd803fcc49ff172e176507ffd4fb8fa1a0b325.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 426, + 313, + 513, + 324 + ], + "lines": [ + { + "bbox": [ + 426, + 313, + 513, + 324 + ], + "spans": [ + { + "bbox": [ + 426, + 313, + 513, + 324 + ], + "type": "text", + "content": "(b) Correctness Reward" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 302, + 338, + 525, + 374 + ], + "lines": [ + { + "bbox": [ + 302, + 338, + 525, + 374 + ], + "spans": [ + { + "bbox": [ + 302, + 338, + 525, + 374 + ], + "type": "text", + "content": "Figure 10: Format (left) and correctness (right) reward trends across training steps for Qwen2.5-3B-Instruct with different reward scale dynamics." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 395, + 525, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 395, + 525, + 449 + ], + "spans": [ + { + "bbox": [ + 312, + 395, + 525, + 449 + ], + "type": "text", + "content": "reward scale gradually increases to its original value, allowing the training to shift focus from format adherence to task correctness accordingly. Formally, the dynamic scaling is then defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 351, + 460, + 485, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 351, + 460, + 485, + 475 + ], + "spans": [ + { + "bbox": [ + 351, + 460, + 485, + 475 + ], + "type": "interline_equation", + "content": "\\operatorname {S c a l e} _ {\\text {f o r m a t}} = [ - 2 + p, 2 - p ],", + "image_path": "93bd6dcfe55814054a392a1e93d1698968bda89f83b2cd068e4161c9e1cf658e.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 352, + 484, + 484, + 499 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 484, + 484, + 499 + ], + "spans": [ + { + "bbox": [ + 352, + 484, + 484, + 499 + ], + "type": "interline_equation", + "content": "\\operatorname {S c a l e} _ {\\text {c o r r e c t}} = [ - 2 - p, 2 + p ]", + "image_path": "532a034727503e5608a8bd7a09ecdb3d9823074f2dd3b1e2057a3462a3c6bf07.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 311, + 504, + 526, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 311, + 504, + 526, + 558 + ], + "spans": [ + { + "bbox": [ + 311, + 504, + 526, + 558 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 311, + 504, + 526, + 558 + ], + "type": "inline_equation", + "content": "p \\in [0,1]" + }, + { + "bbox": [ + 311, + 504, + 526, + 558 + ], + "type": "text", + "content": " similarly represents the normalized training progress. This design ensures a smooth shift of learning focus from format fidelity to correctness." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 562, + 525, + 724 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 562, + 525, + 724 + ], + "spans": [ + { + "bbox": [ + 302, + 562, + 525, + 724 + ], + "type": "text", + "content": "We present the reward dynamics of the original and two dynamic scaling strategies in Figure 10. As shown in Table 6, the Two stage (Coarse) reward setting unexpectedly leads to a drop in performance, whereas the Dynamic (Finegrained) scaling could improve model's benchmarking performance. These findings suggest that abrupt shifts in reward scale may negatively impact the training dynamics. In contrast, a smoother and gradual transition from simpler objectives to more nuanced ones appears to better support the model's learning trajectory and generalization during GRPO training." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 74, + 74, + 89, + 89 + ], + "blocks": [ + { + "bbox": [ + 74, + 74, + 89, + 89 + ], + "lines": [ + { + "bbox": [ + 74, + 74, + 89, + 89 + ], + "spans": [ + { + "bbox": [ + 74, + 74, + 89, + 89 + ], + "type": "image", + "image_path": "ff3197de12e4e5c6e186807989a91719f5b1f0d72bc4490312c2ad49c44e81f4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 91, + 75, + 278, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 91, + 75, + 278, + 155 + ], + "spans": [ + { + "bbox": [ + 91, + 75, + 278, + 155 + ], + "type": "text", + "content": "Takeaway 2: Gradually adjusting reward scales during training, rather than abrupt changes, better supports model learning and generalization, highlighting the benefits of a smoother transition from simpler objectives to more complex ones." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 183, + 233, + 196 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 183, + 233, + 196 + ], + "spans": [ + { + "bbox": [ + 67, + 183, + 233, + 196 + ], + "type": "text", + "content": "5.3 Effect of Reward Granularity" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 200, + 289, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 200, + 289, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 200, + 289, + 348 + ], + "type": "text", + "content": "We now perform a detailed analysis of the effect of reward granularity, focusing specifically on the correctness reward. Tool calling, by nature, poses challenges for reward assignment, as it involves multiple facets beyond a single definitive answer (e.g., in contrast to math reasoning tasks). Our original reward design decomposes correctness into matching the tool name, parameter names, and parameter values, offering a finegrained, \"process-oriented\" signal that reflects partial correctness in tool usage." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 349, + 291, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 349, + 291, + 390 + ], + "spans": [ + { + "bbox": [ + 67, + 349, + 291, + 390 + ], + "type": "text", + "content": "To assess the impact of this granularity, we evaluate three alternative reward formulations with progressively coarser levels of aggregation:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 392, + 291, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 392, + 291, + 433 + ], + "spans": [ + { + "bbox": [ + 69, + 392, + 291, + 433 + ], + "type": "text", + "content": "- Finegrained: We apply strict exact-match constraints to both tool name and parameter name matching. Specifically, we define:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 125, + 444, + 241, + 456 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 125, + 444, + 241, + 456 + ], + "spans": [ + { + "bbox": [ + 125, + 444, + 241, + 456 + ], + "type": "interline_equation", + "content": "r _ {\\text {n a m e}} = \\mathbb {1} \\left[ N _ {G} = N _ {P} \\right] \\in \\{0, 1 \\}", + "image_path": "940999cad06089578cb4662d7f8130594b2b9bbbb2af31ed89b67bfbc0443260.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 89, + 460, + 277, + 485 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 460, + 277, + 485 + ], + "spans": [ + { + "bbox": [ + 89, + 460, + 277, + 485 + ], + "type": "interline_equation", + "content": "r _ {\\text {p a r a m}} = \\sum_ {G _ {j} \\in G} \\mathbb {1} \\left[ \\operatorname {k e y s} \\left(P _ {G}\\right) = \\operatorname {k e y s} \\left(P _ {P}\\right) \\right] \\in [ 0, | G | ]", + "image_path": "4d9febdcdc369a70cb6fae49eebb8354f7837f049c7619901fe6f0342b21bc05.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 494, + 290, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 494, + 290, + 549 + ], + "spans": [ + { + "bbox": [ + 69, + 494, + 290, + 549 + ], + "type": "text", + "content": "- Intermediate: We combine the parameter name and value rewards into a single term that enforces an exact match on the entire parameter dictionary. Formally:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 98, + 558, + 268, + 582 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 98, + 558, + 268, + 582 + ], + "spans": [ + { + "bbox": [ + 98, + 558, + 268, + 582 + ], + "type": "interline_equation", + "content": "r _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} = \\sum_ {G _ {j} \\in G} \\mathbb {1} \\left[ P _ {G} = P _ {P} \\right] \\in [ 0, | G | ]", + "image_path": "fd6386c610ee3b3caf1b352d1ca8d04bed553bb41efae56fabf6f31c51a1a935.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 592, + 290, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 592, + 290, + 659 + ], + "spans": [ + { + "bbox": [ + 69, + 592, + 290, + 659 + ], + "type": "text", + "content": "- Coarse: At the coarsest level, we fully entangle tool name, parameter names, and parameter values, treating the entire tool set as a unit. Reward is given only if the generated tool set exactly matches the ground truth:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 101, + 671, + 265, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 101, + 671, + 265, + 683 + ], + "spans": [ + { + "bbox": [ + 101, + 671, + 265, + 683 + ], + "type": "interline_equation", + "content": "r _ {\\text {n a m e}} + r _ {\\text {p a r a m}} + r _ {\\text {v a l u e}} = \\mathbb {1} [ G = P ] \\in \\{0, 1 \\}", + "image_path": "8639a9e3198136a210a65fcf37f9deaf9d23ec0995494894eb1409d6634af3bc.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 693, + 291, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 693, + 291, + 773 + ], + "spans": [ + { + "bbox": [ + 67, + 693, + 291, + 773 + ], + "type": "text", + "content": "All other aspects of reward computation are kept identical to those described in Section 3.3. Starting from our original design, which is the most finegrained, we progressively entangle reward components to derive increasingly coarse-grained alternatives." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 305, + 69, + 523, + 150 + ], + "blocks": [ + { + "bbox": [ + 305, + 69, + 523, + 150 + ], + "lines": [ + { + "bbox": [ + 305, + 69, + 523, + 150 + ], + "spans": [ + { + "bbox": [ + 305, + 69, + 523, + 150 + ], + "type": "image", + "image_path": "552649707e12fd059580188a20cb855e21ec6e4f969add3bd77cecc592a62b4e.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 159, + 524, + 196 + ], + "lines": [ + { + "bbox": [ + 302, + 159, + 524, + 196 + ], + "spans": [ + { + "bbox": [ + 302, + 159, + 524, + 196 + ], + "type": "text", + "content": "Figure 11: Correctness reward trends across training steps for Qwen2.5-3B-Instruct with different reward granularity." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 216, + 525, + 311 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 216, + 525, + 311 + ], + "spans": [ + { + "bbox": [ + 302, + 216, + 525, + 311 + ], + "type": "text", + "content": "The reward dynamics across training steps, shown in Figure 11, demonstrate that as reward granularity becomes coarser, it becomes harder for the model to achieve higher reward values during RL training. This suggests that overly strict and entangled rewards may lead to sparse learning signals, potentially hindering effective credit assignment." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 312, + 525, + 406 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 312, + 525, + 406 + ], + "spans": [ + { + "bbox": [ + 302, + 312, + 525, + 406 + ], + "type": "text", + "content": "Empirical results in Table 7 further support this insight: our original, most finegrained reward strategy performs well across models. In general, finer-grained reward decomposition leads to better training outcomes and higher final task performance, indicating its advantage in promoting more stable and effective policy learning." + } + ] + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 308, + 415, + 324, + 430 + ], + "blocks": [ + { + "bbox": [ + 308, + 415, + 324, + 430 + ], + "lines": [ + { + "bbox": [ + 308, + 415, + 324, + 430 + ], + "spans": [ + { + "bbox": [ + 308, + 415, + 324, + 430 + ], + "type": "image", + "image_path": "1454ca022118595022cec19aa91d5512614820037c89daf0cd26bb64d0b07139.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "bbox": [ + 325, + 416, + 515, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 416, + 515, + 497 + ], + "spans": [ + { + "bbox": [ + 325, + 416, + 515, + 497 + ], + "type": "text", + "content": "Takeaway 3: Finegrained reward decomposition provides richer learning signals, highlighting its role in enabling more effective training compared to coarse reward formulations, which can impede progress and degrade final performance." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 524, + 381, + 536 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 524, + 381, + 536 + ], + "spans": [ + { + "bbox": [ + 302, + 524, + 381, + 536 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 302, + 544, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 544, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 544, + 525, + 775 + ], + "type": "text", + "content": "In this paper, we present a reward design tailored for GRPO training on tool use tasks. Empirically, our model trained from scratch using GRPO consistently outperforms both SFT-based and SFT-initialized RL baselines, as well as models trained with alternative RL algorithms, across a variety of held-out tool use benchmarks. Furthermore, we demonstrate that our model generalizes well to QA settings, exhibiting robust multi-turn interactions, emergent proactiveness, and metacognitive behaviors, all of which are key traits for efficient and adaptable tool use, lying at the core of foundational agent capabilities. Our in-depth analysis of reward types, scaling strategies, granularity, and temporal dynamics provides further insights into how reward shaping influences learning and behavior. We hope these findings serve as a roadmap for future work" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 71, + 68, + 523, + 185 + ], + "blocks": [ + { + "bbox": [ + 71, + 68, + 523, + 185 + ], + "lines": [ + { + "bbox": [ + 71, + 68, + 523, + 185 + ], + "spans": [ + { + "bbox": [ + 71, + 68, + 523, + 185 + ], + "type": "table", + "html": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Original)46.20%77.96%76.98%60.73%2.25%100.00%56.44%
Qwen2.5-1.5B-Instruct (Finegrained)40.71%78.00%75.55%48.91%2.00%100.00%24.84%
Qwen2.5-1.5B-Instruct (Intermediate)37.65%77.94%72.46%43.00%1.62%100.00%12.45%
Qwen2.5-1.5B-Instruct (Coarse)36.72%76.44%70.86%41.27%2.12%100.00%12.24%
Qwen2.5-3B-Instruct (Original)52.98%81.58%79.43%73.78%3.75%88.24%84.85%
Qwen2.5-3B-Instruct (Finegrained)52.06%81.65%79.64%69.21%5.50%83.33%78.14%
Qwen2.5-3B-Instruct (Intermediate)51.36%81.15%80.07%68.64%4.25%88.89%75.74%
Qwen2.5-3B-Instruct (Coarse)51.40%79.48%78.54%68.73%5.62%88.89%77.80%
Llama-3.2-3B-Instruct (Original)44.10%74.38%75.18%56.86%1.37%94.44%62.23%
Llama-3.2-3B-Instruct (Finegrained)39.82%64.71%70.68%52.20%0.25%100.00%56.68%
Llama-3.2-3B-Instruct (Intermediate)38.62%59.83%71.86%50.56%0.25%94.44%55.68%
Llama-3.2-3B-Instruct (Coarse)35.95%52.00%61.43%48.96%1.12%83.33%61.92%
", + "image_path": "c0decb62c717c5b9e2ed93474a6147510ea0da8e44b962ada76b0687d177fa8e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 189, + 194, + 403, + 206 + ], + "lines": [ + { + "bbox": [ + 189, + 194, + 403, + 206 + ], + "spans": [ + { + "bbox": [ + 189, + 194, + 403, + 206 + ], + "type": "text", + "content": "Table 7: BFCL V3 Benchmark Results (Granularity)" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 227, + 291, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 227, + 291, + 282 + ], + "spans": [ + { + "bbox": [ + 67, + 227, + 291, + 282 + ], + "type": "text", + "content": "in applying reinforcement learning to tool use. Ultimately, we envision that reward is all tool learning needs, and that RL offers a powerful path toward generalizable and creative agent behavior." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 306, + 127, + 318 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 306, + 127, + 318 + ], + "spans": [ + { + "bbox": [ + 68, + 306, + 127, + 318 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 327, + 291, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 291, + 403 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 291, + 403 + ], + "type": "text", + "content": "Emre Can Acikgoz, Jeremiah Greer, Akul Datta, Ze Yang, William Zeng, Oussama Elachqar, Emmanuel Koukoumidis, Dilek Hakkani-Tur, and Gokhan Tur. 2025. Can a single model master both multi-turn conversations and tool use? coalm: A unified conversational agentic language model. Preprint, arXiv:2502.08820." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 414, + 291, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 414, + 291, + 469 + ], + "spans": [ + { + "bbox": [ + 68, + 414, + 291, + 469 + ], + "type": "text", + "content": "Jinheon Baek, Sujay Kumar Jauhar, Silviu Cucerzan, and Sung Ju Hwang. 2024. Researchagent: Iterative research idea generation over scientific literature with large language models. arXiv preprint arXiv:2404.07738." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 479, + 291, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 479, + 291, + 524 + ], + "spans": [ + { + "bbox": [ + 68, + 479, + 291, + 524 + ], + "type": "text", + "content": "Baian Chen, Chang Shu, Ehsan Shareghi, Nigel Collier, Karthik Narasimhan, and Shunyu Yao. 2023a. Fireact: Toward language agent fine-tuning. arXiv preprint arXiv:2310.05915." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 534, + 291, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 534, + 291, + 578 + ], + "spans": [ + { + "bbox": [ + 68, + 534, + 291, + 578 + ], + "type": "text", + "content": "Nuo Chen, Hongguang Li, Baoyuan Wang, and Jia Li. 2023b. From good to great: Improving math reasoning with tool-augmented interleaf prompting. arXiv preprint arXiv:2401.05384." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 588, + 291, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 588, + 291, + 642 + ], + "spans": [ + { + "bbox": [ + 68, + 588, + 291, + 642 + ], + "type": "text", + "content": "Wenhu Chen, Xueguang Ma, Xinyi Wang, and William W Cohen. 2022. Program of thoughts prompting: Disentangling computation from reasoning for numerical reasoning tasks. arXiv preprint arXiv:2211.12588." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 653, + 291, + 742 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 653, + 291, + 742 + ], + "spans": [ + { + "bbox": [ + 68, + 653, + 291, + 742 + ], + "type": "text", + "content": "Zehui Chen, Kuikun Liu, Qiuchen Wang, Wenwei Zhang, Jiangning Liu, Dahua Lin, Kai Chen, and Feng Zhao. 2024. Agent-FLAN: Designing data and methods of effective agent tuning for large language models. In *Findings of the Association for Computational Linguistics: ACL* 2024, pages 9354–9366, Bangkok, Thailand. Association for Computational Linguistics." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 751, + 291, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 751, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 68, + 751, + 291, + 774 + ], + "type": "text", + "content": "Tianzhe Chu, Yuexiang Zhai, Jihan Yang, Shengbang Tong, Saining Xie, Dale Schuurmans, Quoc V" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 228, + 526, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 228, + 526, + 273 + ], + "spans": [ + { + "bbox": [ + 313, + 228, + 526, + 273 + ], + "type": "text", + "content": "Le, Sergey Levine, and Yi Ma. 2025. Sft memorizes, rl generalizes: A comparative study of foundation model post-training. arXiv preprint arXiv:2501.17161." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 282, + 525, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 282, + 525, + 317 + ], + "spans": [ + { + "bbox": [ + 303, + 282, + 525, + 317 + ], + "type": "text", + "content": "Quy-Anh Dang and Chris Ngo. 2025. Reinforcement learning for reasoning in small llms: What works and what doesn't. arXiv preprint arXiv:2503.16219." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 326, + 525, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 326, + 525, + 382 + ], + "spans": [ + { + "bbox": [ + 303, + 326, + 525, + 382 + ], + "type": "text", + "content": "Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. 2024. The llama 3 herd of models. arXiv preprint arXiv:2407.21783." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 391, + 525, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 391, + 525, + 446 + ], + "spans": [ + { + "bbox": [ + 303, + 391, + 525, + 446 + ], + "type": "text", + "content": "Zhibin Gou, Zhihong Shao, Yeyun Gong, Yelong Shen, Yujiu Yang, Minlie Huang, Nan Duan, and Weizhu Chen. 2023. Tora: A tool-integrated reasoning agent for mathematical problem solving. arXiv preprint arXiv:2309.17452." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 303, + 457, + 525, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 457, + 525, + 513 + ], + "spans": [ + { + "bbox": [ + 303, + 457, + 525, + 513 + ], + "type": "text", + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 303, + 523, + 525, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 523, + 525, + 577 + ], + "spans": [ + { + "bbox": [ + 303, + 523, + 525, + 577 + ], + "type": "text", + "content": "Jiangyong Huang, Silong Yong, Xiaojian Ma, Xiongkun Linghu, Puhao Li, Yan Wang, Qing Li, Song-Chun Zhu, Baoxiong Jia, and Siyuan Huang. 2023. An embodied generalist agent in 3d world. arXiv preprint arXiv:2311.12871." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 303, + 588, + 525, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 588, + 525, + 655 + ], + "spans": [ + { + "bbox": [ + 303, + 588, + 525, + 655 + ], + "type": "text", + "content": "Zhen Huang, Haoyang Zou, Xuefeng Li, Yixiu Liu, Yuxiang Zheng, Ethan Chern, Shijie Xia, Yiwei Qin, Weizhe Yuan, and Pengfei Liu. 2024. O1 replication journey-part 2: Surpassing o1-preview through simple distillation, big progress or bitter lesson? arXiv preprint arXiv:2411.16489." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 303, + 664, + 525, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 664, + 525, + 708 + ], + "spans": [ + { + "bbox": [ + 303, + 664, + 525, + 708 + ], + "type": "text", + "content": "Yoshitaka Inoue, Tianci Song, and Tianfan Fu. 2024. Drugagent: Explainable drug repurposing agent with large language model-based reasoning. arXiv preprint arXiv:2408.13378." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 303, + 719, + 525, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 719, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 303, + 719, + 525, + 773 + ], + "type": "text", + "content": "Bowen Jin, Hansi Zeng, Zhenrui Yue, Dong Wang, Hamed Zamani, and Jiawei Han. 2025. Search: Training lms to reason and leverage search engines with reinforcement learning. arXiv preprint arXiv:2503.09516." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 772 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 116 + ], + "type": "text", + "content": "Minki Kang, Jongwon Jeong, and Jaewoong Cho. 2025. T1: Tool-integrated self-verification for test-time compute scaling in small language models. arXiv preprint arXiv:2504.04718." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 125, + 289, + 169 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 125, + 289, + 169 + ], + "spans": [ + { + "bbox": [ + 69, + 125, + 289, + 169 + ], + "type": "text", + "content": "Timo Kaufmann, Paul Weng, Viktor Bengs, and Eyke Hüllermeier. 2023. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 178, + 289, + 244 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 178, + 289, + 244 + ], + "spans": [ + { + "bbox": [ + 69, + 178, + 289, + 244 + ], + "type": "text", + "content": "Komal Kumar, Tajamul Ashraf, Omkar Thawakar, Rao Muhammad Anwer, Hisham Cholakkal, Mubarak Shah, Ming-Hsuan Yang, Phillip HS Torr, Salman Khan, and Fahad Shahbaz Khan. 2025. Lm post-training: A deep dive into reasoning large language models. arXiv preprint arXiv:2502.21321." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 252, + 289, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 252, + 289, + 319 + ], + "spans": [ + { + "bbox": [ + 69, + 252, + 289, + 319 + ], + "type": "text", + "content": "Minghao Li, Yingxiu Zhao, Bowen Yu, Feifan Song, Hangyu Li, Haiyang Yu, Zhoujun Li, Fei Huang, and Yongbin Li. 2023. Api-bank: A comprehensive benchmark for tool-augmented llms. In Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, pages 3102-3116." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 327, + 289, + 359 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 327, + 289, + 359 + ], + "spans": [ + { + "bbox": [ + 69, + 327, + 289, + 359 + ], + "type": "text", + "content": "Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025a. Limr: Less is more for rl scaling. arXiv preprint arXiv:2502.11886." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 369, + 289, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 369, + 289, + 401 + ], + "spans": [ + { + "bbox": [ + 69, + 369, + 289, + 401 + ], + "type": "text", + "content": "Xuefeng Li, Haoyang Zou, and Pengfei Liu. 2025b. Torl: Scaling tool-integrated rl. arXiv preprint arXiv:2503.23383." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 410, + 289, + 455 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 410, + 289, + 455 + ], + "spans": [ + { + "bbox": [ + 69, + 410, + 289, + 455 + ], + "type": "text", + "content": "Minpeng Liao, Wei Luo, Chengxi Li, Jing Wu, and Kai Fan. 2024. Mario: Math reasoning with code interpreter output-a reproducible pipeline. arXiv preprint arXiv:2401.08190." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 464, + 289, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 464, + 289, + 528 + ], + "spans": [ + { + "bbox": [ + 69, + 464, + 289, + 528 + ], + "type": "text", + "content": "Qiqiang Lin, Muning Wen, Qiuying Peng, Guanyu Nie, Junwei Liao, Jun Wang, Xiaoyun Mo, Jiamu Zhou, Cheng Cheng, Yin Zhao, et al. 2024. Hammer: Robust function-calling for on-device language models via function masking. arXiv preprint arXiv:2410.04587." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 538, + 289, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 538, + 289, + 604 + ], + "spans": [ + { + "bbox": [ + 69, + 538, + 289, + 604 + ], + "type": "text", + "content": "Chen Ling, Xujiang Zhao, Jiaying Lu, Chengyuan Deng, Can Zheng, Junxiang Wang, Tanmoy Chowdhury, Yun Li, Hejie Cui, Xuchao Zhang, et al. 2023. Domain specialization as the key to make large language models disruptive: A comprehensive survey. arXiv preprint arXiv:2305.18703." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 613, + 289, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 613, + 289, + 668 + ], + "spans": [ + { + "bbox": [ + 69, + 613, + 289, + 668 + ], + "type": "text", + "content": "Weiwen Liu, Xu Huang, Xingshan Zeng, Xinlong Hao, Shuai Yu, Dexun Li, Shuai Wang, Weinan Gan, Zhengying Liu, Yuanqing Yu, et al. 2024. Toolace: Winning the points of llm function calling. arXiv preprint arXiv:2409.00920." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 677, + 289, + 720 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 677, + 289, + 720 + ], + "spans": [ + { + "bbox": [ + 69, + 677, + 289, + 720 + ], + "type": "text", + "content": "Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. Advances in Neural Information Processing Systems, 37:124198-124235." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 729, + 289, + 772 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 289, + 772 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 289, + 772 + ], + "type": "text", + "content": "Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2023. Gorilla: Large language model connected with massive apis. arXiv preprint arXiv:2305.15334." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 524, + 774 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "text", + "content": "Shishir G Patil, Tianjun Zhang, Xin Wang, and Joseph E Gonzalez. 2024. Gorilla: Large language model connected with massive apis. Advances in Neural Information Processing Systems, 37:126544-126565." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 126, + 524, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 126, + 524, + 171 + ], + "spans": [ + { + "bbox": [ + 304, + 126, + 524, + 171 + ], + "type": "text", + "content": "Ofir Press, Muru Zhang, Sewon Min, Ludwig Schmidt, Noah A Smith, and Mike Lewis. 2022. Measuring and narrowing the compositionality gap in language models. arXiv preprint arXiv:2210.03350." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 181, + 524, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 181, + 524, + 236 + ], + "spans": [ + { + "bbox": [ + 304, + 181, + 524, + 236 + ], + "type": "text", + "content": "Cheng Qian, Emre Can Acikgoz, Hongru Wang, Xiusi Chen, Avirup Sil, Dilek Hakkani-Tur, Gokhan Tur, and Heng Ji. 2025. Smart: Self-aware agent for tool overuse mitigation. arXiv preprint arXiv:2502.11435." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 248, + 524, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 248, + 524, + 312 + ], + "spans": [ + { + "bbox": [ + 304, + 248, + 524, + 312 + ], + "type": "text", + "content": "Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. 2023. Creator: Tool creation for disentangling abstract and concrete reasoning of large language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2023, pages 6922-6939." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 324, + 524, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 324, + 524, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 324, + 524, + 380 + ], + "type": "text", + "content": "Cheng Qian, Peixuan Han, Qinyu Luo, Bingxiang He, Xiusi Chen, Yuji Zhang, Hongyi Du, Jiarui Yao, Xiaocheng Yang, Denghui Zhang, et al. 2024a. Escapebench: Pushing language models to think outside the box. arXiv preprint arXiv:2412.13549." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 390, + 524, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 390, + 524, + 477 + ], + "spans": [ + { + "bbox": [ + 304, + 390, + 524, + 477 + ], + "type": "text", + "content": "Cheng Qian, Chenyan Xiong, Zhenghao Liu, and Zhiyuan Liu. 2024b. Toolink: Linking toolkit creation and using through chain-of-solving on open-source model. In Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 831-854." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 488, + 524, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 488, + 524, + 543 + ], + "spans": [ + { + "bbox": [ + 304, + 488, + 524, + 543 + ], + "type": "text", + "content": "Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. 2024a. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 554, + 524, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 554, + 524, + 608 + ], + "spans": [ + { + "bbox": [ + 304, + 554, + 524, + 608 + ], + "type": "text", + "content": "Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Yufei Huang, Chaojun Xiao, Chi Han, et al. 2023. Tool learning with foundation models. arXiv preprint arXiv.2304.08354, 10." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 619, + 524, + 675 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 619, + 524, + 675 + ], + "spans": [ + { + "bbox": [ + 304, + 619, + 524, + 675 + ], + "type": "text", + "content": "Yujia Qin, Shengding Hu, Yankai Lin, Weize Chen, Ning Ding, Ganqu Cui, Zheni Zeng, Xuanhe Zhou, Yufei Huang, Chaojun Xiao, et al. 2024b. Tool learning with foundation models. ACM Computing Surveys, 57(4):1-40." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 686, + 524, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 686, + 524, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 686, + 524, + 774 + ], + "type": "text", + "content": "Yujia Qin, Shihao Liang, Yining Ye, Kunlun Zhu, Lan Yan, Yaxi Lu, Yankai Lin, Xin Cong, Xiangru Tang, Bill Qian, Sihan Zhao, Lauren Hong, Runchu Tian, Ruobing Xie, Jie Zhou, Mark Gerstein, Dahai Li, Zhiyuan Liu, and Maosong Sun. 2024c. Toollm: Facilitating large language models to master 16000+ real-world apis. In The Twelfth International Conference on Learning Representations." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 773 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 138 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 138 + ], + "type": "text", + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 149, + 291, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 149, + 291, + 205 + ], + "spans": [ + { + "bbox": [ + 69, + 149, + 291, + 205 + ], + "type": "text", + "content": "Yusuf Roohani, Andrew Lee, Qian Huang, Jian Vora, Zachary Steinhart, Kexin Huang, Alexander Marson, Percy Liang, and Jure Leskovec. 2024. Biodiscoveryagent: An ai agent for designing genetic perturbation experiments. arXiv preprint arXiv:2405.17631." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 216, + 291, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 216, + 291, + 282 + ], + "spans": [ + { + "bbox": [ + 69, + 216, + 291, + 282 + ], + "type": "text", + "content": "Timo Schick, Jane Dwivedi-Yu, Roberto Dessì, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettle-moyer, Nicola Cancedda, and Thomas Scialom. 2023. Toolformer: Language models can teach themselves to use tools. Advances in Neural Information Processing Systems, 36:68539-68551." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 294, + 290, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 294, + 290, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 294, + 290, + 338 + ], + "type": "text", + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 349, + 290, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 349, + 290, + 406 + ], + "spans": [ + { + "bbox": [ + 69, + 349, + 290, + 406 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 417, + 290, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 417, + 290, + 472 + ], + "spans": [ + { + "bbox": [ + 69, + 417, + 290, + 472 + ], + "type": "text", + "content": "Haozhan Shen, Peng Liu, Jingcheng Li, Chunxin Fang, Yibo Ma, Jiajia Liao, Qiaoli Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, et al. 2025. Vlmr1: A stable and generalizable r1-style large vision-language model. arXiv preprint arXiv:2504.07615." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 483, + 290, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 483, + 290, + 538 + ], + "spans": [ + { + "bbox": [ + 69, + 483, + 290, + 538 + ], + "type": "text", + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv:2409.19256." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 550, + 290, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 550, + 290, + 606 + ], + "spans": [ + { + "bbox": [ + 69, + 550, + 290, + 606 + ], + "type": "text", + "content": "Huatong Song, Jinhao Jiang, Yingqian Min, Jie Chen, Zhipeng Chen, Wayne Xin Zhao, Lei Fang, and Ji-Rong Wen. 2025. R1-searcher: Incentivizing the search capability in llms via reinforcement learning. arXiv preprint arXiv:2503.05592." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 618, + 290, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 618, + 290, + 673 + ], + "spans": [ + { + "bbox": [ + 69, + 618, + 290, + 673 + ], + "type": "text", + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. 2025. Kimi k1.5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 684, + 290, + 706 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 684, + 290, + 706 + ], + "spans": [ + { + "bbox": [ + 69, + 684, + 290, + 706 + ], + "type": "text", + "content": "Qwen Team. 2024. Qwen2.5: A party of foundation models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 718, + 290, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 718, + 290, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 718, + 290, + 773 + ], + "type": "text", + "content": "Tu Vu, Mohit Iyyer, Xuezhi Wang, Noah Constant, Jerry Wei, Jason Wei, Chris Tar, Yun-Hsuan Sung, Denny Zhou, Quoc Le, et al. 2023. Freshllms: Refreshing large language models with search engine augmentation. arXiv preprint arXiv:2310.03214." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 773 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 305, + 72, + 525, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 72, + 525, + 138 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 525, + 138 + ], + "type": "text", + "content": "Yidong Wang, Qi Guo, Wenjin Yao, Hongbo Zhang, Xin Zhang, Zhen Wu, Meishan Zhang, Xinyu Dai, Qingsong Wen, Wei Ye, et al. 2024. Autosurvey: Large language models can automatically write surveys. Advances in Neural Information Processing Systems, 37:115119-115145." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 145, + 525, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 145, + 525, + 200 + ], + "spans": [ + { + "bbox": [ + 304, + 145, + 525, + 200 + ], + "type": "text", + "content": "Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. 2025. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 208, + 525, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 208, + 525, + 263 + ], + "spans": [ + { + "bbox": [ + 304, + 208, + 525, + 263 + ], + "type": "text", + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R Narasimhan, and Yuan Cao. 2023. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 270, + 525, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 270, + 525, + 325 + ], + "spans": [ + { + "bbox": [ + 304, + 270, + 525, + 325 + ], + "type": "text", + "content": "Yining Ye, Xin Cong, Shizuo Tian, Yujia Qin, Chong Liu, Yankai Lin, Zhiyuan Liu, and Maosong Sun. 2023. Rational decision-making agent with internalized utility judgment. arXiv preprint arXiv:2308.12519." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 333, + 525, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 333, + 525, + 388 + ], + "spans": [ + { + "bbox": [ + 304, + 333, + 525, + 388 + ], + "type": "text", + "content": "Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. 2025. Dapo: An opensource llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 395, + 525, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 395, + 525, + 450 + ], + "spans": [ + { + "bbox": [ + 304, + 395, + 525, + 450 + ], + "type": "text", + "content": "Yuanqing Yu, Zhefan Wang, Weizhi Ma, Zhicheng Guo, Jingtao Zhan, Shuai Wang, Chuhan Wu, Zhiqiang Guo, and Min Zhang. 2024. Steptool: A step-grained reinforcement learning framework for tool learning in llms. arXiv preprint arXiv:2410.07745." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 457, + 525, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 457, + 525, + 523 + ], + "spans": [ + { + "bbox": [ + 304, + 457, + 525, + 523 + ], + "type": "text", + "content": "Yufeng Yuan, Qiying Yu, Xiaochen Zuo, Ruofei Zhu, Wenyuan Xu, Jiaze Chen, Chengyi Wang, TianTian Fan, Zhengyin Du, Xiangpeng Wei, et al. 2025. Vapo: Efficient and reliable reinforcement learning for advanced reasoning tasks. arXiv preprint arXiv:2504.05118." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 531, + 525, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 531, + 525, + 597 + ], + "spans": [ + { + "bbox": [ + 304, + 531, + 525, + 597 + ], + "type": "text", + "content": "Aohan Zeng, Mingdao Liu, Rui Lu, Bowen Wang, Xiao Liu, Yuxiao Dong, and Jie Tang. 2024. AgentTuning: Enabling generalized agent abilities for LLMs. In Findings of the Association for Computational Linguistics: ACL 2024, pages 3053-3077, Bangkok, Thailand. Association for Computational Linguistics." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 604, + 525, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 604, + 525, + 649 + ], + "spans": [ + { + "bbox": [ + 304, + 604, + 525, + 649 + ], + "type": "text", + "content": "Yuanzhao Zhai, Tingkai Yang, Kele Xu, Feng Dawei, Cheng Yang, Bo Ding, and Huaimin Wang. 2024. Enhancing decision-making for llm agents via step-level q-value models. arXiv preprint arXiv:2409.09345." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 656, + 525, + 711 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 656, + 525, + 711 + ], + "spans": [ + { + "bbox": [ + 304, + 656, + 525, + 711 + ], + "type": "text", + "content": "Hongxin Zhang, Weihua Du, Jiaming Shan, Qinhong Zhou, Yilun Du, Joshua B Tenenbaum, Tianmin Shu, and Chuang Gan. 2023. Building cooperative embodied agents modularly with large language models. arXiv preprint arXiv:2307.02485." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 718, + 525, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 718, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 718, + 525, + 773 + ], + "type": "text", + "content": "Jianguo Zhang, Tian Lan, Ming Zhu, Zuxin Liu, Thai Hoang, Shirley Kokane, Weiran Yao, Juntao Tan, Akshara Prabhakar, Haolin Chen, et al. 2024. xlam: A family of large action models to empower ai agent systems. arXiv preprint arXiv:2409.03215." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 128 + ], + "type": "text", + "content": "Yuxiang Zheng, Dayuan Fu, Xiangkun Hu, Xiaojie Cai, Lyumanshan Ye, Pengrui Lu, and Pengfei Liu. 2025. Deepresearch: Scaling deep research via reinforcement learning in real-world environments. arXiv preprint arXiv:2504.03160." + } + ] + } + ], + "index": 0 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 121, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 121, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 121, + 84 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 92, + 196, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 92, + 196, + 106 + ], + "spans": [ + { + "bbox": [ + 68, + 92, + 196, + 106 + ], + "type": "text", + "content": "A User Prompt Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 113, + 291, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 113, + 291, + 195 + ], + "spans": [ + { + "bbox": [ + 67, + 113, + 291, + 195 + ], + "type": "text", + "content": "The system instruction is shown in Figure 4. The user prompt is used to store the trajectory history, including intermediate thoughts, tool calls, environment observations, and any additional user commands. The complete user instruction is presented in Figure 12." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 204, + 190, + 218 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 204, + 190, + 218 + ], + "spans": [ + { + "bbox": [ + 68, + 204, + 190, + 218 + ], + "type": "text", + "content": "B Experiment Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 225, + 290, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 225, + 290, + 348 + ], + "spans": [ + { + "bbox": [ + 67, + 225, + 290, + 348 + ], + "type": "text", + "content": "Training Data Details. We empirically use 4K data points for training, as each dataset consists of samples drawn from the same distribution. Adding more data of similar nature does not increase task diversity. Moreover, we observe that increasing the dataset size beyond 4K does not yield noticeable improvements in the training convergence or final performance, suggesting diminishing returns from additional data under this setting." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 354, + 290, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 290, + 437 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 290, + 437 + ], + "type": "text", + "content": "GRPO Setting Details. For all the tool calls in the dataset, we all use JSON format to represent tool call as it's easy to parse and is the most general and structure way of performing tool call. For the GRPO training, we use 2 A100 (80G) GPUs per run with the following hyper-parameters:" + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 87, + 445, + 271, + 681 + ], + "blocks": [ + { + "bbox": [ + 87, + 445, + 271, + 681 + ], + "lines": [ + { + "bbox": [ + 87, + 445, + 271, + 681 + ], + "spans": [ + { + "bbox": [ + 87, + 445, + 271, + 681 + ], + "type": "table", + "html": "
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length2048
Max Response Length1024
Optimization
Learning Rate1e-6
PPO Mini Batch Size128
KL Loss UsedFalse
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.6
Number of Rollouts4
Training & Logging
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
", + "image_path": "d65267e68fdd04a19a99679ce48395fe4605cf2779afc6e66e0b2988f88b2363.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 720, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 720, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 720, + 291, + 775 + ], + "type": "text", + "content": "Baselines. The 400 selected data points used for SFT share the same distribution as the 4k data points used for RL training, but differ in content. For SFT, each data point includes a field," + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 524, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 524, + 125 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 524, + 125 + ], + "type": "text", + "content": "with thought content distilled from Deepseek-R1 trajectories. In contrast, GRPO does not require ground truth thought, as only the tool calls are used to compute rewards in the GRPO setting." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 126, + 525, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 126, + 525, + 234 + ], + "spans": [ + { + "bbox": [ + 302, + 126, + 525, + 234 + ], + "type": "text", + "content": "We use 400 data points for SFT based on empirical observations that this amount is sufficient to help the raw model learn to follow our tool call format. This provides a stronger initialization and reduces the burden of learning the format from scratch during RL training. However, we also find that relying solely on SFT can lead to overfitting, which may ultimately degrade performance." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 242, + 525, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 242, + 525, + 296 + ], + "spans": [ + { + "bbox": [ + 302, + 242, + 525, + 296 + ], + "type": "text", + "content": "PPO Setting Details. We apply approximately the same parameter settings as GRPO for the PPO training. Similarly, we use 2 A100 (80G) GPUs per run with the following hyper-parameters:" + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 323, + 305, + 506, + 560 + ], + "blocks": [ + { + "bbox": [ + 92, + 687, + 265, + 700 + ], + "lines": [ + { + "bbox": [ + 92, + 687, + 265, + 700 + ], + "spans": [ + { + "bbox": [ + 92, + 687, + 265, + 700 + ], + "type": "text", + "content": "Table 8: Configuration for GRPO training." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 323, + 305, + 506, + 560 + ], + "lines": [ + { + "bbox": [ + 323, + 305, + 506, + 560 + ], + "spans": [ + { + "bbox": [ + 323, + 305, + 506, + 560 + ], + "type": "table", + "html": "
CategoryHyperparameter
Data Configuration
Train Batch Size512
Validation Batch Size128
Max Prompt Length1024
Max Response Length512
Optimization
Actor Learning Rate1e-6
Critic Learning Rate1e-5
PPO Mini Batch Size128
PPO Micro Batch Size8
KL Coefficient0.001
Rollout Configuration
Rollout Namevllm
GPU Memory Utilization0.3
Training & Logging
Critic Warmup Steps0
Save Frequency (Steps)15
Test Frequency (Steps)5
Total Epochs15
", + "image_path": "8e17cc4deaa8d819f6fc131f70f9940360284bd7649a786eba2c64bd6b7e3e7e.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 331, + 567, + 496, + 580 + ], + "lines": [ + { + "bbox": [ + 331, + 567, + 496, + 580 + ], + "spans": [ + { + "bbox": [ + 331, + 567, + 496, + 580 + ], + "type": "text", + "content": "Table 9: Configuration for PPO training." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 302, + 603, + 422, + 617 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 603, + 422, + 617 + ], + "spans": [ + { + "bbox": [ + 302, + 603, + 422, + 617 + ], + "type": "text", + "content": "C Additional Results" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 626, + 525, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 626, + 525, + 707 + ], + "spans": [ + { + "bbox": [ + 302, + 626, + 525, + 707 + ], + "type": "text", + "content": "We present additional results on three benchmarks, applying GRPO and PPO methods to models initialized with SFT on 4K data points. This setting serves as a \"theoretical\" upper bound, since the same 4K data is first used for SFT and subsequently reused for RL training." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 708, + 525, + 775 + ], + "type": "text", + "content": "The results are shown in Table 10, Table 11, and Table 12 for BFCL, API-Bank, and Bamboogle, respectively. We compare RL training initialized with models fine-tuned on either 400 or 4K SFT data points." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 103, + 68, + 489, + 221 + ], + "blocks": [ + { + "bbox": [ + 103, + 68, + 489, + 221 + ], + "lines": [ + { + "bbox": [ + 103, + 68, + 489, + 221 + ], + "spans": [ + { + "bbox": [ + 103, + 68, + 489, + 221 + ], + "type": "image", + "image_path": "86c8046a4208455b9d278a90cc615af285d011f3f28c85a707ecd1bb20d82e49.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 193, + 228, + 400, + 242 + ], + "lines": [ + { + "bbox": [ + 193, + 228, + 400, + 242 + ], + "spans": [ + { + "bbox": [ + 193, + 228, + 400, + 242 + ], + "type": "text", + "content": "Figure 12: The user prompt used for TIR's rollout." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 71, + 253, + 523, + 431 + ], + "blocks": [ + { + "bbox": [ + 71, + 253, + 523, + 431 + ], + "lines": [ + { + "bbox": [ + 71, + 253, + 523, + 431 + ], + "spans": [ + { + "bbox": [ + 71, + 253, + 523, + 431 + ], + "type": "table", + "html": "
ModelOverall AccNon-Live AST AccNon-Live Exec AccLive AccMulti Turn AccRelevance DetectionIrrelevance Detection
Qwen2.5-1.5B-Instruct (Raw)19.41%16.00%13.18%35.58%0.00%44.44%82.49%
Qwen2.5-1.5B-Instruct (SFT400+PPO)42.95%77.65%69.75%55.73%1.88%100.00%48.40%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)40.93%70.54%60.79%56.33%1.00%94.44%58.63%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)40.24%66.42%62.02%54.58%2.50%94.12%55.09%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)42.63%66.60%64.77%60.15%1.38%88.89%67.98%
Qwen2.5-3B-Instruct (Raw)33.04%42.52%40.80%53.96%1.00%64.71%56.01%
Qwen2.5-3B-Instruct (SFT400+PPO)45.80%78.29%71.09%58.76%5.12%94.12%54.70%
Qwen2.5-3B-Instruct (SFT400+GRPO)46.42%76.21%68.93%64.15%1.75%88.89%71.76%
Qwen2.5-3B-Instruct (SFT4k+PPO)48.22%77.75%73.18%64.27%5.25%94.12%66.41%
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.82%75.12%69.52%68.19%2.38%77.78%76.16%
Qwen2.5-7B-Instruct (Raw)41.97%66.02%70.11%53.51%4.25%76.47%62.66%
Qwen2.5-7B-Instruct (SFT400+PPO)42.02%83.90%72.62%51.84%0.25%100%29.66%
Qwen2.5-7B-Instruct (SFT400+GRPO)39.25%80.69%74.34%46.51%0.25%100%14.19%
Qwen2.5-7B-Instruct (SFT4k+PPO)33.80%42.67%49.50%51.80%2.38%77.78%55.79%
Qwen2.5-7B-Instruct (SFT4k+GRPO)35.18%43.58%50.39%55.49%0.87%77.78%67.12%
Llama-3.2-3B-Instruct (Raw)22.09%17.44%14.57%43.85%0.00%77.78%66.07%
Llama-3.2-3B-Instruct (SFT400+PPO)41.62%68.10%69.88%52.98%3.00%94.12%56.29%
Llama-3.2-3B-Instruct (SFT400+GRPO)42.54%65.15%68.98%59.40%0.88%72.22%65.80%
Llama-3.2-3B-Instruct (SFT4k+PPO)45.41%73.71%68.46%62.27%2.50%82.35%68.75%
Llama-3.2-3B-Instruct (SFT4k+GRPO)45.50%70.69%67.70%64.73%1.00%77.78%78.85%
", + "image_path": "515ed892cbd40ad8d47a38bf66845b41c9ea2e093ff999e1bf328a5899c6054a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 71, + 463, + 302, + 645 + ], + "blocks": [ + { + "bbox": [ + 174, + 439, + 418, + 452 + ], + "lines": [ + { + "bbox": [ + 174, + 439, + 418, + 452 + ], + "spans": [ + { + "bbox": [ + 174, + 439, + 418, + 452 + ], + "type": "text", + "content": "Table 10: BFCL V3 Benchmark Results (Additional Result)" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 71, + 463, + 302, + 645 + ], + "lines": [ + { + "bbox": [ + 71, + 463, + 302, + 645 + ], + "spans": [ + { + "bbox": [ + 71, + 463, + 302, + 645 + ], + "type": "table", + "html": "
ModelOverall AccLevel 1Level 2Level 3
Qwen2.5-1.5B-Instruct (Raw)30.65%28.32%35.82%35.11%
Qwen2.5-1.5B-Instruct (SFT400+PPO)57.12%60.9%50.75%48.85%
Qwen2.5-1.5B-Instruct (SFT400+GRPO)61.31%64.16%58.21%54.20%
Qwen2.5-1.5B-Instruct (SFT4k+PPO)61.31%64.91%56.72%52.67%
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)59.46%65.16%53.73%45.04%
Qwen2.5-3B-Instruct (Raw)51.59%59.65%32.84%36.64%
Qwen2.5-3B-Instruct (SFT400+PPO)65.16%67.92%55.22%61.83%
Qwen2.5-3B-Instruct (SFT400+GRPO)62.48%68.67%58.21%45.80%
Qwen2.5-3B-Instruct (SFT4k+PPO)60.13%64.41%44.78%54.96%
Qwen2.5-3B-Instruct (SFT4k+GRPO)60.80%64.41%56.72%51.91%
Qwen2.5-7B-Instruct (Raw)62.48%70.68%49.25%44.27%
Qwen2.5-7B-Instruct (SFT400+PPO)63.15%72.43%58.21%37.4%
Qwen2.5-7B-Instruct (SFT400+GRPO)54.10%61.40%52.24%32.82%
Qwen2.5-7B-Instruct (SFT4k+PPO)59.30%61.40%40.30%61.60%
Qwen2.5-7B-Instruct (SFT4k+GRPO)52.60%56.39%34.33%50.38%
Llama-3.2-3B-Instruct (Raw)40.54%44.86%29.85%32.82%
Llama-3.2-3B-Instruct (SFT400+PPO)57.79%63.16%47.76%46.56%
Llama-3.2-3B-Instruct (SFT400+GRPO)56.78%63.60%41.79%43.51%
Llama-3.2-3B-Instruct (SFT4k+PPO)54.10%60.65%40.30%41.22%
Llama-3.2-3B-Instruct (SFT4k+GRPO)50.92%59.15%34.33%34.35%
", + "image_path": "3b949f3b4313649aa4b382e0445d2ea3d6d9a666abd057e2de2228351c351419.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 318, + 463, + 524, + 645 + ], + "blocks": [ + { + "bbox": [ + 77, + 652, + 292, + 664 + ], + "lines": [ + { + "bbox": [ + 77, + 652, + 292, + 664 + ], + "spans": [ + { + "bbox": [ + 77, + 652, + 292, + 664 + ], + "type": "text", + "content": "Table 11: API-Bank Test Results (Additional Result)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 318, + 463, + 524, + 645 + ], + "lines": [ + { + "bbox": [ + 318, + 463, + 524, + 645 + ], + "spans": [ + { + "bbox": [ + 318, + 463, + 524, + 645 + ], + "type": "table", + "html": "
ModelAccuracyAvg Num Tool Call
Qwen2.5-1.5B-Instruct (Raw)20.8%0.61
Qwen2.5-1.5B-Instruct (SFT400+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT400+GRPO)38.4%0.96
Qwen2.5-1.5B-Instruct (SFT4k+PPO)36.8%1.06
Qwen2.5-1.5B-Instruct (SFT4k+GRPO)34.4%1.02
Qwen2.5-3B-Instruct (Raw)52.0%1.77
Qwen2.5-3B-Instruct (SFT400+PPO)43.2%1.04
Qwen2.5-3B-Instruct (SFT400+GRPO)56.8%0.99
Qwen2.5-3B-Instruct (SFT4k+PPO)46.4%1.01
Qwen2.5-3B-Instruct (SFT4k+GRPO)47.2%0.98
Qwen2.5-7B-Instruct (Raw)69.6%1.42
Qwen2.5-7B-Instruct (SFT400+PPO)45.6%3.54
Qwen2.5-7B-Instruct (SFT400+GRPO)29.6%3.70
Qwen2.5-7B-Instruct (SFT4k+PPO)40.0%1.25
Qwen2.5-7B-Instruct (SFT4k+GRPO)32.0%1.25
Llama-3.2-3B-Instruct (Raw)34.4%1.25
Llama-3.2-3B-Instruct (SFT400+PPO)39.2%1.33
Llama-3.2-3B-Instruct (SFT400+GRPO)45.6%1.00
Llama-3.2-3B-Instruct (SFT4k+PPO)49.6%1.02
Llama-3.2-3B-Instruct (SFT4k+GRPO)42.4%1.03
", + "image_path": "e2378971efe8c48bc5aecbb8f0825cd5d3570bcbe7aef5b317bdc0a4649d542f.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 315, + 653, + 526, + 677 + ], + "lines": [ + { + "bbox": [ + 315, + 653, + 526, + 677 + ], + "spans": [ + { + "bbox": [ + 315, + 653, + 526, + 677 + ], + "type": "text", + "content": "Table 12: Bamboogle Test Results (Additional Result)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 699, + 292, + 766 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 699, + 292, + 766 + ], + "spans": [ + { + "bbox": [ + 67, + 699, + 292, + 766 + ], + "type": "text", + "content": "Interestingly, our findings suggest that initializing from a model finetuned on 4K data does not consistently outperform initialization from a model finetuned on only 400 data points. In the BFCL benchmark, we even observe cases where perfor" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 699, + 526, + 767 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 699, + 526, + 767 + ], + "spans": [ + { + "bbox": [ + 302, + 699, + 526, + 767 + ], + "type": "text", + "content": "mance drops below that of the raw instruct model. This counterintuitive result may stem from overfitting during the SFT phase, which could restrict the model's ability to explore during RL training and lead to poorer generalization on held-out tasks." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file